gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.engine.components import Component from heat.engine.components import Components from heat.tests.common import HeatTestCase class ComponentTest(HeatTestCase): def test_init(self): comp = Component() self.assertEqual(comp.type, 'OS::Heat::SoftwareConfig') self.assertEqual(comp.properties, {}) self.assertEqual(comp.scripts, {}) self.assertEqual(comp.relations, []) self.assertEqual(comp.hosted_on(), None) self.assertEqual(comp.depends(), []) def test_hosted_on(self): schema = { 'relationships': [ {'hosted_on': 'wordpress'} ] } comp = Component(schema) self.assertEqual(comp.hosted_on(), 'wordpress') def test_depends(self): schema = { 'relationships': [ {'depends_on': 'config_mysql'} ] } comp = Component(schema) self.assertEqual(comp.depends(), ['config_mysql']) comp['relationships'].append({'depends_on': 'config_wordpress'}) self.assertEqual(comp.depends(), ['config_mysql', 'config_wordpress']) class ComponentsTest(HeatTestCase): def test_init(self): schema = {} comps = Components(schema) self.assertEqual(0, len(comps)) schema['config_mysql'] = {} comps = Components(schema) self.assertEqual(1, len(comps)) comp = comps['config_mysql'] self.assertIsInstance(comp, Component) def test_depends(self): schema = { 'install_mysql': { }, 'config_mysql': { 'relationships': [ {'depends_on': 'install_mysql'} ] }, 'start_mysql': { 'relationships': [ {'depends_on': 'config_mysql'} ] } } comps = Components(schema) self.assertEqual(3, len(comps)) deps = comps.depends() self.assertEqual(2, len(deps)) self.assertIn('install_mysql', deps) self.assertIn('config_mysql', deps) def test_multi_depends(self): schema = { 'install_mysql': { }, 'config_mysql': { 'relationships': [ {'depends_on': 'install_mysql'} ] }, 'start_mysql': { 'relationships': [ {'depends_on': 'config_mysql'} ] }, 'install_wordpress': {}, 'config_wordpress': { 'relationships': [ {'depends_on': 'install_wordpress'} ] }, 'start_wordpress': { 'relationships': [ {'depends_on': 'config_wordpress'}, {'depends_on': 'start_mysql'} ] } } comps = Components(schema) deps = comps.depends() self.assertEqual(5, len(deps)) self.assertNotIn('start_wordpress', deps) self.assertIn('install_wordpress', deps) self.assertIn('config_wordpress', deps) self.assertIn('start_mysql', deps) self.assertIn('config_mysql', deps) self.assertIn('install_mysql', deps) def test_filter(self): schema = { 'install_mysql': { 'relationships': [ {'hosted_on': 'mysql'} ] }, 'config_mysql': { 'relationships': [ {'hosted_on': 'mysql'}, {'depends_on': 'install_mysql'} ] }, 'start_mysql': { 'relationships': [ {'hosted_on': 'mysql'}, {'depends_on': 'config_mysql'} ] }, 'install_wordpress': { 'relationships': [ {'hosted_on': 'wordpress'} ] }, 'config_wordpress': { 'relationships': [ {'hosted_on': 'wordpress'}, {'depends_on': 'install_wordpress'} ] }, 'start_wordpress': { 'relationships': [ {'hosted_on': 'wordpress'}, {'depends_on': 'config_wordpress'}, {'depends_on': 'start_mysql'} ] } } comps = Components(schema) names = comps.filter('mysql') self.assertEqual(3, len(names)) self.assertIn('config_mysql', names) self.assertIn('install_mysql', names) self.assertIn('start_mysql', names) names = comps.filter('wordpress') self.assertEqual(3, len(names)) self.assertIn('config_wordpress', names) self.assertIn('install_wordpress', names) self.assertIn('start_wordpress', names) def test_validate(self): schema = {'install_mysql': {}} comps = Components(schema) self.assertTrue(comps.validate()) schema = { 'config_mysql': { 'relationships': [ {'depends_on': 'config_mysql'} ] } } comps = Components(schema) err = self.assertRaises(ValueError, comps.validate) self.assertIn('component config_mysql depends on itself.', str(err)) schema = { 'config_mysql': { 'relationships': [ {'depends_on': 'install_mysql'} ] } } comps = Components(schema) err = self.assertRaises(ValueError, comps.validate) self.assertIn('component install_mysql is not defined.', str(err)) schema = { 'install_mysql': { }, 'config_mysql': { 'relationships': [ {'depends_on': 'install_mysql'}, {'depends_on': 'install_mysql'} ] } } comps = Components(schema) err = self.assertRaises(ValueError, comps.validate) self.assertIn('duplicated install_mysql in config_mysql depends on.', str(err))
#!/usr/bin/python from fvregress import * import string # really? you have to do this? import xmlrpclib wantPause = True test="ports" try: h= FvRegress() port=16633 h.addController("alice", 54321) h.addController("bob", 54322) if len(sys.argv) > 1 : wantPause = False port=int(sys.argv[1]) timeout=60 h.useAlreadyRunningFlowVisor(port) else: wantPause = False timeout=5 h.spawnFlowVisor(configFile="tests-"+test+".xml") h.lamePause() if wantPause: doPause("start tests") # start up a flowvisor with 1 switch and two guests switch_features= FvRegress.OFVERSION + '''06 00 e0 ef be ad de 00 00 00 00 00 00 00 02 00 00 00 80 02 00 00 00 00 00 00 00 00 00 00 00 00 99 32 30 00 00 00 00 70 6f 72 74 20 31 35 33 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 32 31 30 30 30 30 70 6f 72 74 20 31 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 02 32 32 30 30 30 30 70 6f 72 74 20 32 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 03 32 33 30 30 30 30 70 6f 72 74 20 33 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00''' if wantPause: doPause("before adding switch1") h.addSwitch(name='switch1',dpid=2, switch_features=switch_features) if wantPause: doPause("start tests") #################################### Start Tests # # send a packet in from a port that was not listed in switch_features # packet_to_g0_p0 = FvRegress.OFVERSION + '''0a 0052 0000 0000 0000 0101 # 0040 0000 0000 0000 0000 0001 0000 0000 # 0002 0800 4500 0032 0000 0000 40ff f72c # c0a8 0028 c0a8 0128 7a18 586b 1108 97f5 # 19e2 657e 07cc 31c3 11c7 c40c 8b95 5151 # 3354 51d5 0036''' # # should dynamically grow for alice # packet_to_g0_p0 = FvRegress.OFVERSION + '''0a 0052 0000 0001 0000 0101 # 0040 0001 0000 0000 0000 0002 0000 0000 # 0001 0800 4500 0032 0000 0000 40ff f72c # c0a8 0028 c0a8 0128 7a18 586b 1108 97f5 # 19e2 657e 07cc 31c3 11c7 c40c 8b95 5151 # 3354 51d5 0036''' # # should not be sent to bob (explicitly lists ports) # h.runTest(name="switch1controller packet_in routing - by port", timeout=timeout, events= [ # TestEvent( "send","switch","switch1", packet=packet_to_g0_p0), # TestEvent( "recv","guest","alice", packet=packet_to_g0_p0), # TestEvent( "clear?","guest","bob", packet=""), # ]) ############################################################ # test port stats pruning port_stats_request_before = FvRegress.OFVERSION + \ '''10 00 14 00 00 03 cf 00 04 00 00 ff ff 00 00 00 00 00 00''' port_stats_request_after_alice = FvRegress.OFVERSION + \ '''10 00 14 00 00 01 02 00 04 00 00 ff ff 00 00 00 00 00 00''' port_stats_request_after_bob = FvRegress.OFVERSION + \ '''10 00 14 00 00 01 03 00 04 00 00 ff ff 00 00 00 00 00 00''' port_stats_reply = FvRegress.OFVERSION + \ '''11 01 ac 00 00 01 02 00 04 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00 8a 8c 55 00 00 00 00 00 1b fb 14 00 00 00 02 0b 7a 82 ae 00 00 00 00 0e 2b be 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff 00 03 00 00 00 00 00 00 00 00 00 00 00 86 f2 d5 00 00 00 00 00 75 9e e7 00 00 00 00 2b cf 69 de 00 00 00 01 ed 71 df 6c 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff 00 2d 00 00 00 00 00 00 00 00 00 00 00 26 9c 4e 00 00 00 00 00 1d 95 ea 00 00 00 00 1e 5f 61 6f 00 00 00 00 31 65 e3 b5 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff fe 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff''' port_stats_reply2 = FvRegress.OFVERSION + \ '''11 01 ac 00 00 01 03 00 04 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00 8a 8c 55 00 00 00 00 00 1b fb 14 00 00 00 02 0b 7a 82 ae 00 00 00 00 0e 2b be 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff 00 03 00 00 00 00 00 00 00 00 00 00 00 86 f2 d5 00 00 00 00 00 75 9e e7 00 00 00 00 2b cf 69 de 00 00 00 01 ed 71 df 6c 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff 00 2d 00 00 00 00 00 00 00 00 00 00 00 26 9c 4e 00 00 00 00 00 1d 95 ea 00 00 00 00 1e 5f 61 6f 00 00 00 00 31 65 e3 b5 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff fe 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff''' port_stats_reply_alice = FvRegress.OFVERSION + \ '''11 01 ac 00 00 03 d1 00 04 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00 8a 8c 55 00 00 00 00 00 1b fb 14 00 00 00 02 0b 7a 82 ae 00 00 00 00 0e 2b be 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff 00 03 00 00 00 00 00 00 00 00 00 00 00 86 f2 d5 00 00 00 00 00 75 9e e7 00 00 00 00 2b cf 69 de 00 00 00 01 ed 71 df 6c 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff 00 2d 00 00 00 00 00 00 00 00 00 00 00 26 9c 4e 00 00 00 00 00 1d 95 ea 00 00 00 00 1e 5f 61 6f 00 00 00 00 31 65 e3 b5 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff fe 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff''' port_stats_reply_bob = FvRegress.OFVERSION + \ '''11 00 dc 00 00 03 cf 00 04 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00 8a 8c 55 00 00 00 00 00 1b fb 14 00 00 00 02 0b 7a 82 ae 00 00 00 00 0e 2b be 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff 00 03 00 00 00 00 00 00 00 00 00 00 00 86 f2 d5 00 00 00 00 00 75 9e e7 00 00 00 00 2b cf 69 de 00 00 00 01 ed 71 df 6c 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff''' h.runTest(name="port_stats pruning", timeout=timeout, events= [ # alice sends a port_stats request TestEvent( "send","guest","alice", packet=port_stats_request_before), # fv changes the XID TestEvent( "recv","switch","switch1", packet=port_stats_request_after_alice, strict=True), TestEvent( "send","switch","switch1", packet=port_stats_reply,strict=True), TestEvent( "recv","guest","alice",packet=port_stats_reply_alice), # bob sends a port_stats request TestEvent( "send","guest","bob", packet=port_stats_request_before), # fv changes the XID TestEvent( "recv","switch","switch1", packet=port_stats_request_after_bob, strict=True), TestEvent( "send","switch","switch1", packet=port_stats_reply2,strict=True), TestEvent( "recv","guest","bob",packet=port_stats_reply_bob) ]) ############################################################ packet_out_pAll = FvRegress.OFVERSION + '''0d 0058 0000 abcd ffff ffff ffff 0008 0000 0008 fffb 0080 0000 0000 0001 0000 0000 0002 0800 4500 0032 0000 4000 4011 2868 c0a8 c800 c0a8 c901 0001 0000 001e d7c3 cdc0 251b e6dc ea0c 726d 973f 2b71 c2e4 1b6f bc11 8250''' # note, the xid here is a function of the order of the tests; # DO NOT CHANGE test order packet_out_p0_aftr_port0 = FvRegress.OFVERSION+ '''0d 00 70 02 01 00 00 ff ff ff ff ff ff 00 20 00 00 00 08 00 99 00 80 00 00 00 08 00 01 00 80 00 00 00 08 00 02 00 80 00 00 00 08 00 03 00 80 00 00 00 00 00 01 00 00 00 00 00 02 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50''' packet_out_pAll_bob = FvRegress.OFVERSION + '''0d 0058 0000 abcd ffff ffff ffff 0008 0000 0008 fffb 0080 0000 0000 0002 0000 0000 0001 0800 4500 0032 0000 4000 4011 2868 c0a8 c800 c0a8 c901 0001 0000 001e d7c3 cdc0 251b e6dc ea0c 726d 973f 2b71 c2e4 1b6f bc11 8250''' packet_out_pAll_bob_aftr = FvRegress.OFVERSION+ '''0d 00 60 00 00 ab cd ff ff ff ff ff ff 00 10 00 00 00 08 00 01 00 80 00 00 00 08 00 03 00 80 00 00 00 00 00 02 00 00 00 00 00 01 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50''' h.runTest(name="packet_out; valid", timeout=timeout, events= [ # alice sends a FLOOD packet_out; alice has access to all ports TestEvent( "send","guest","alice", packet=packet_out_pAll), # fv expands it to ports=1,153,2,3 (yes, 153... to ensure there aren't huge jumps) TestEvent( "recv","switch","switch1", packet=packet_out_p0_aftr_port0), # bob sends a FLOOD packet_out TestEvent( "send","guest","bob", packet=packet_out_pAll_bob), # fv expands it to ports=1,3 TestEvent( "recv","switch","switch1", packet=packet_out_pAll_bob_aftr), ]) ############################################################ packet_out_pInPort = FvRegress.OFVERSION + '''0d 0058 0000 abff ffff ffff 0001 0008 0000 0008 fffb 0080 0000 0000 0001 0000 0000 0002 0800 4500 0032 0000 4000 4011 2868 c0a8 c800 c0a8 c901 0001 0000 001e d7c3 cdc0 251b e6dc ea0c 726d 973f 2b71 c2e4 1b6f bc11 8250''' packet_out_pInPort_aftr = FvRegress.OFVERSION+ '''0d 00 68 04 01 00 00 ff ff ff ff 00 01 00 18 00 00 00 08 00 99 00 80 00 00 00 08 00 02 00 80 00 00 00 08 00 03 00 80 00 00 00 00 00 01 00 00 00 00 00 02 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50''' h.runTest(name="dont flood in_port; valid", timeout=timeout, events= [ TestEvent( "send","guest","alice", packet=packet_out_pInPort), TestEvent( "recv","switch","switch1", packet=packet_out_pInPort_aftr), ]) ############################################################ # dynamically add port 13, emulating HP's dyn port stuff: bug #184 port_mod_add_13 = FvRegress.OFVERSION + '''0c 0040 0000 dead 00 0000 0000 0000 00 000d 0102 0304 0506 8080 8080 2049 4100 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 ''' packet_out_pAll = FvRegress.OFVERSION + '''0d 0058 0000 abcd ffff ffff ffff 0008 0000 0008 fffb 0080 0000 0000 0001 0000 0000 0002 0800 4500 0032 0000 4000 4011 2868 c0a8 c800 c0a8 c901 0001 0000 001e d7c3 cdc0 251b e6dc ea0c 726d 973f 2b71 c2e4 1b6f bc11 8250''' # note, the xid here is a function of the order of the tests; # DO NOT CHANGE test order packet_out_p0_aftr_port0 = FvRegress.OFVERSION+ '''0d 00 78 02 01 00 00 ff ff ff ff ff ff 00 28 00 00 00 08 00 99 00 80 00 00 00 08 00 01 00 80 00 00 00 08 00 02 00 80 00 00 00 08 00 03 00 80 00 00 00 08 00 0d 00 80 00 00 00 00 00 01 00 00 00 00 00 02 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50''' packet_out_pAll_bob = FvRegress.OFVERSION + '''0d 0058 0000 abcd ffff ffff ffff 0008 0000 0008 fffb 0080 0000 0000 0002 0000 0000 0001 0800 4500 0032 0000 4000 4011 2868 c0a8 c800 c0a8 c901 0001 0000 001e d7c3 cdc0 251b e6dc ea0c 726d 973f 2b71 c2e4 1b6f bc11 8250''' packet_out_pAll_bob_aftr = FvRegress.OFVERSION+ '''0d 00 60 00 00 ab cd ff ff ff ff ff ff 00 10 00 00 00 08 00 01 00 80 00 00 00 08 00 03 00 80 00 00 00 00 00 02 00 00 00 00 00 01 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50''' h.runTest(name="packet_out; flood to dynamic port", timeout=timeout, events= [ # announce that port 13 has been added TestEvent( "send","switch","switch1", packet=port_mod_add_13), TestEvent( "delay","switch","switch1",packet=None), TestEvent( "send" ,"guest","alice", packet=packet_out_pAll), # fv expands it to ports=1,153,2,3,13 -- 13 is new, relative to prev test TestEvent( "recv","switch","switch1", packet=packet_out_p0_aftr_port0), # bob sends a FLOOD packet_out TestEvent( "send","guest","bob", packet=packet_out_pAll_bob), # fv expands it to ports=1,3 # bob should not change TestEvent( "recv","switch","switch1", packet=packet_out_pAll_bob_aftr), ]) ######################################### rpcport=18080 user="fvadmin" passwd="0fw0rk" s = xmlrpclib.ServerProxy("https://" + user + ":" + passwd + "@localhost:" + str(rpcport) + "/xmlrpc") change = { "operation" : "REMOVE", "id" : "3"} print "Sleeping 1 sec to let change propagate" time.sleep(1) ### now remove access from Bob on port 3 if not s.api.changeFlowSpace([change]) : raise "FAILED: FlowSpace Change failed!" else : print "SUCCESS: FLowSpace Changed: removed bob's port 3" bob_without_port3 = FvRegress.OFVERSION + '''0d 00 58 00 00 ab cd ff ff ff ff ff ff 00 08 00 00 00 08 00 01 00 80 00 00 00 00 00 02 00 00 00 00 00 01 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50''' h.runTest(name="packet_out; without port3", timeout=timeout, events= [ # bob sends a FLOOD packet_out TestEvent( "send","guest","bob", packet=packet_out_pAll_bob), # fv expands it to just ports=1 (even though it's the same input as before) TestEvent( "recv","switch","switch1", packet=bob_without_port3), ]) ######################################### change = { "operation" : "ADD", "priority" : "200", "dpid":"all", "match":"in_port=2,dl_src=00:00:00:00:00:00:00:01", "actions":"Slice=bob:4"} ### now add access for Bob on port 2 if not s.api.changeFlowSpace([change]) : raise "FAILED: FlowSpace Change failed!" else : print "SUCCESS: FLowSpace Changed: added bob's port 2" bob_with_port2 = FvRegress.OFVERSION + '''0d 00 60 00 00 ab cd ff ff ff ff ff ff 00 10 00 00 00 08 00 01 00 80 00 00 00 08 00 02 00 80 00 00 00 00 00 02 00 00 00 00 00 01 08 00 45 00 00 32 00 00 40 00 40 11 28 68 c0 a8 c8 00 c0 a8 c9 01 00 01 00 00 00 1e d7 c3 cd c0 25 1b e6 dc ea 0c 72 6d 97 3f 2b 71 c2 e4 1b 6f bc 11 82 50''' h.runTest(name="packet_out; with port2", timeout=timeout, events= [ # bob sends a FLOOD packet_out TestEvent( "send","guest","bob", packet=packet_out_pAll_bob), # fv expands it to just ports=1 (even though it's the same input as before) TestEvent( "recv","switch","switch1", packet=bob_with_port2), ]) ######################################### change = { "operation" : "REMOVE", "id" : "5" } ### now remove access from all Alice's ports if not s.api.changeFlowSpace([change]) : raise "FAILED: FlowSpace Change failed!" else : print "SUCCESS: FLowSpace Changed: removed Alice's access" h.lamePause("Sleeping to let FV and test suite drop switch", 0.5) h.runTest(name="dropped Alice", timeout=timeout, events= [ # Make sure Alice has no switches connected to her TestEvent( "countSwitches","guest","alice", actorID2=0,packet=None), ]) ######################################### # more tests for this setup HERE #################################### End Tests finally: if wantPause: doPause("start cleanup") h.cleanup()
# Copyright (c) 2013-2015 by California Institute of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the California Institute of Technology nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH # OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. """Mathematical Sets and Power Sets""" from __future__ import print_function import logging import warnings from itertools import chain, combinations try: from collections.abc import Iterable, Hashable, Container except ImportError: from collections import Iterable, Hashable, Container from pprint import pformat from random import randint logger = logging.getLogger(__name__) def compare_lists(list1, list2): """Compare list contents, ignoring ordering. Hashability of elements not assumed, incurring O(N**2) See Also ======== L{MathSet} @type list1: list @type list2: list @return: True if a bijection exists between the lists. Note that this takes into account multiplicity of elements. @rtype: bool """ if not isinstance(list1, list): raise TypeError('Not a list, instead list1:\n\t' + str(list1)) if not isinstance(list2, list): raise TypeError('Not a list, instead list2:\n\t' + str(list2)) dummy_list = list(list1) same_lists = True for item in list2: try: dummy_list.remove(item) except: # unique element not found to be rm'd same_lists = False break # anything remaining ? same_lists = same_lists and not bool(dummy_list) return same_lists class MathSet(object): """Mathematical set, allows unhashable elements. Examples ======== >>> s = MathSet(['a', 1, [1,2], {'a', 'b'} ] ) Then print(s) shows how the elements were separately stored in a set and list, to optimize contains operations: >>> print(s) MathSet(['a', 1, [1, 2], set(['a', 'b'])]) Set operations similar to the builtin type are supported: >>> p = MathSet() >>> p.add(1) >>> p |= [1, 2] >>> p |= {3, 4} >>> p |= [[1, 2], '5', {'a':1} ] >>> p.add_from([5, 6, '7', {8, 9} ] ) >>> p.remove(1) >>> p MathSet([2, 3, 4, 5, 6, '5', '7', [1, 2], {'a': 1}, set([8, 9])]) See Also ======== L{SubSet}, L{PowerSet}, set """ def __init__(self, iterable=[]): """Initialize by adding elements from iterable. Example ======= >>> s = MathSet([1, 2, 'a', {3, 4} ] ) @param iterable: iterable from which to initialize the set S which underlies the PowerSet 2^S @type iterable: iterable, any element types allowed """ self._delete_all() self.add_from(iterable) def __repr__(self): return 'MathSet(' + pformat(list(self._set) + self._list) + ')' def _debug_repr(self): set_str = ', '.join([repr(i) for i in self._set]) return 'MathSet({' + set_str + '} +' + str(self._list) + ')' def __or__(self, other): """Union with another mathematical set. See Also ======== L{__ior__} @param other: any other mathematical set. @type other: iterable, elements not restricted to hashable @return: self | iterable @rtype: MathSet """ s = MathSet(self) s.add_from(other) return s def __mul__(self, other): """Return the Cartesian product with another C{MathSet}. Example ======= >>> a = MathSet([1, 2] ) >>> b = MathSet([3, 4] ) >>> c = a *b >>> print(type(c) ) >>> print(c) If we prefer a CartesianProduct returned instead: >>> c = a.cartesian(b) See Also ======== L{cartesian} @param other: set with which to take Cartesian product @type other: MathSet @return: Cartesian product of C{self} with C{other}. @rtype: C{MathSet} (explicit construction) """ cartesian = [(x, y) for x in self for y in other] return MathSet(cartesian) def __ior__(self, iterable): """Union with of MathSet with iterable. Example ======= >>> s = MathSet([1, 2] ) >>> s |= [3, 4] # much cleaner & familiar >>> print(s) set([1, 2, 3, 4]) U [] See Also ======== L{__or__} @param iterable: any mathematical set. @type iterable: iterable, elements not restricted to hashable @return: self | iterable @rtype: MathSet """ self.add_from(iterable) return self def __sub__(self, rm_items): s = MathSet(self) print('s = ' + str(s)) print(rm_items) for item in rm_items: if item in s: print('Removing...: ' + str(item)) s.remove(item) return s def __isub__(self, rm_items): """Delete multiple elements.""" for item in rm_items: if item in self: self.remove(item) return self def __eq__(self, other): if not isinstance(other, MathSet): raise TypeError( 'For now comparison only to another MathSet.\n' 'Got:\n\t' + str(other) + '\n of type: ' + str(type(other)) + ', instead.') same_lists = compare_lists(self._list, other._list) return (self._set == other._set) and same_lists def __contains__(self, item): if isinstance(item, Hashable): try: return item in self._set except: logger.error('UnHashable items within Hashable.') return item in self._list def __iter__(self): return iter(self._list + list(self._set)) def __len__(self): """Number of elements in set.""" return len(self._set) + len(self._list) def _filter_hashables(self, iterable): return filter(lambda x: isinstance(x, Hashable), iterable) def _filter_unhashables(self, iterable): return list(filter(lambda x: not isinstance(x, Hashable), iterable)) def _delete_all(self): self._set = set() self._list = list() def add(self, item): """Add element to mathematical set. Example ======= >>> s = MathSet() >>> s.add(1) set([1]) U [] See Also ======== L{add_from}, L{__ior__}, L{remove} @param item: the new set element @type item: anything, if hashable it is stored in a Python set, otherwise stored in a list. """ if isinstance(item, Hashable): try: self._set.add(item) return except TypeError: logger.error('UnHashable items within Hashable.') if item not in self._list: self._list.append(item) else: logger.warning('item already in MathSet.') def add_from(self, iterable): """Add multiple elements to mathematical set. Equivalent to |= Example ======= >>> s = MathSet() >>> s.add_from([1, 2, {3} ] ) is equivalent to: >>> s = MathSet() >>> s |= [1, 2, {3} ] See Also ======== L{add}, L{__ior__}, L{remove} @param iterable: new MathSet elements @type iterable: iterable containing (possibly not hashable) elements """ if not isinstance(iterable, Iterable): raise TypeError( 'Can only add elements to MathSet from Iterable.\n' 'Got:\n\t' + str(iterable) + '\n instead.') if isinstance(iterable, MathSet): self._set |= set(iterable._set) self._list = list(unique(self._list + iterable._list)) return # speed up if isinstance(iterable, set): self._set |= iterable return # filter to optimize storage try: self._set |= set(self._filter_hashables(iterable)) self._list = list(unique( self._list + self._filter_unhashables(iterable))) return except: # ...if contents of elements in iterable are mutable self._list = list(unique(self._list + list(iterable))) def remove(self, item): """Remove existing element from mathematical set. Example ======= >>> p = MathSet([1, 2] ) >>> p.remove(1) >>> p set([2]) U [] See Also ======== L{add}, L{add_from}, L{__or__} @param item: An item already in the set. For adding items, see add. """ if item not in self: warnings.warn( 'Set element not in set S.\n' 'Maybe you targeted another element for removal ?') if isinstance(item, Hashable): try: self._set.remove(item) return except: logger.debug('item: ' + str(item) + ', contains unhashables.') self._list.remove(item) def pop(self): """Remove and return random MathSet element. Raises KeyError if MathSet is empty. """ if not self: raise KeyError('Nothing to pop: MathSet is empty.') if self._set and self._list: if randint(0, 1): return self._set.pop() else: return self._list.pop() elif self._set and not self._list: return self._set.pop() elif self._list and not self._set: return self._list.pop() else: raise Exception('Bug in empty MathSet: not self above' + 'should not reaching this point.') def intersection(self, iterable): """Return intersection with iterable. @param iterable: find common elements with C{self} @type iterable: C{Iterable} @return: intersection of C{self} with C{iterable} @rtype: C{MathSet} """ s = MathSet() for item in iterable: if item in self: s.add(item) return s def intersects(self, iterable): """Check intersection with iterable. Checks the existence of common elements with iterable. >>> s = MathSet() >>> s.add(1) >>> r = [1,2] >>> s.intersects(r) True @param iterable: with which to check intersection @type iterable: C{Iterable} @return: C{True} if C{self} has common element with C{iterable}. Otherwise C{False}. @rtype: C{bool} """ for item in iterable: if item in self: return True return False class SubSet(MathSet): """Subset of selected MathSet, or other Iterable. Prior to adding new elements, it checks that they are in its superset. Example ======= >>> superset = [1, 2] >>> s = SubSet(superset) >>> s |= [1, 2] >>> print(s) SubSet([[1, 2]]) >>> s.add(3) raises exception because 3 \\notin [1,2] See Also ======== L{MathSet}, L{PowerSet} """ def __init__(self, superset, iterable=None): """Define the superset of this set. @param superset: This SubSet checked vs C{superset} @type superset: Iterable @param iterable: elements to add to subset @type iterable: Iterable """ self._superset = superset super(SubSet, self).__init__([]) if not isinstance(superset, Container): raise TypeError('superset must be Iterable,\n' 'Got instead:\n\t' + str(superset)) def __repr__(self): return 'SubSet(' + pformat(list(self._set) + self._list) + ')' def _debug_repr(self): set_str = ', '.join([repr(i) for i in self._set]) return 'SubSet({' + set_str + '} +' + str(self._list) + ')' @property def superset(self): return self._superset def add(self, new_element): """Add state to subset. Extends MathSet.add with subset relation checking. Example ======= C{new_initial_state} should already be a state. First use states.add to include it in set of states, then states.add_initial. See Also ======== L{MathSet.add} """ if new_element not in self._superset: raise Exception( 'New element state \\notin superset.\n' 'Add it first to states using e.g. sys.states.add()\n' 'FYI: new element:\n\t' + str(new_element) + '\n' 'and superset:\n\t' + str(self._superset)) super(SubSet, self).add(new_element) def add_from(self, new_elements): """Add multiple new elements to subset. Extends MathSet.add_from with subset relation checking. Note ==== It would be sufficient to extend only .add provided MathSet.add_from called .add iteratively. However MathSet.add_from filters states, which is arguably more efficient. So both .add and .add_from need to be extended here. See Also ======== L{add}, L{__ior__} """ if not is_subset(new_elements, self._superset): raise Exception('All new_elements:\n\t' + str(new_elements) + '\nshould already be \\in ' + 'self.superset = ' + str(self._superset)) super(SubSet, self).add_from(new_elements) class CartesianProduct(object): """List of MathSets, with Cartesian semantics.""" def __init__(self): self.mathsets = [] def __contains__(self, element): # TODO check ordered if not isinstance(element, Iterable): raise TypeError( 'Argument element must be Iterable, otherwise cannot ' 'recover which item in it belongs to which set in the ' 'Cartesian product.') for idx, item in enumerate(element): if item not in self.mathsets[idx]: return False return True def __mul__(self, mathsets): """Multiply Cartesian products.""" if not isinstance(mathsets, list): raise TypeError('mathsets given must be a list of MathSet.') def add(self, mathset): self.mathsets += [mathset] def add_from(self, mathsets): self.mathsets += mathsets def remove(self, mathset): self.mathsets.remove(mathset) def remove_from(self, mathsets): for mathset in mathsets: self.remove(mathset) def unique(iterable): """Return unique elements. Note ==== Always returning a list for consistency was tempting, however this defeats the purpose of creating this function to achieve brevity elsewhere in the code. @return: iterable with duplicates removed, as C{set} if possible. @rtype: - If all items in C{iterable} are hashable, then returns C{set}. - If iterable contains unhashable item, then returns C{list} of unique elements. """ # hashable items ? try: unique_items = set(iterable) except: unique_items = [] for item in iterable: if item not in unique_items: unique_items.append(item) return unique_items def contains_multiple(iterable): """Does iterable contain any item multiple times ?""" return len(iterable) != len(unique(iterable)) def is_subset(small_iterable, big_iterable): """Comparison for handling list <= set, and lists with unhashable items. """ # asserts removed when compiling with optimization on... # it would have been elegant to use instead: # assert(isinstance(big_iterable, Iterable)) # since the error msg is succintly stated by the assert itself if not isinstance(big_iterable, (Iterable, Container)): raise TypeError('big_iterable must be either Iterable or Container, ' 'otherwise subset relation undefined.\n' 'Got:\n\t' + str(big_iterable) + '\ninstead.') if not isinstance(small_iterable, Iterable): raise TypeError('small_iterable must be Iterable, ' 'otherwise subset relation undefined.\n' 'Got:\n\t' + str(small_iterable) + '\ninstead.') # nxor if isinstance(small_iterable, str) != isinstance(big_iterable, str): raise TypeError('Either both or none of small_iterable, ' 'big_iterable should be strings.\n' 'Otherwise subset relation between string ' 'and non-string may introduce bugs.\nGot:\n\t' + str(small_iterable) + ',\t' + str(big_iterable) + '\ninstead.') try: # first, avoid object duplication if not isinstance(small_iterable, set): small_iterable = set(small_iterable) if not isinstance(big_iterable, set): big_iterable = set(big_iterable) return small_iterable <= big_iterable except TypeError: # not all items hashable... try: # list to avoid: unhashable \in set ? => error if not isinstance(big_iterable, list): # avoid object duplication big_iterable = list(big_iterable) except: logger.error('Could not convert big_iterable to list.') for item in small_iterable: if item not in big_iterable: return False return True except: raise Exception('Failed to compare iterables.') def powerset(iterable): """powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3) From http://docs.python.org/2/library/itertools.html, also in https://pypi.python.org/pypi/more-itertools """ s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) class PowerSet(object): """Efficiently store power set of a mathematical set. Set here isn't necessarily a Python set, i.e., it may comprise of unhashable elements. Example ======= Specify the mathematical set S underlying the PowerSet. >>> S = [[1, 2], '3', {'a':1}, 1] >>> p = PowerSet(S) >>> q = PowerSet() >>> q.math_set = S Add new element to underlying set S. >>> p.math_set.add({3: 'a'} ) Add multiple new elements to underlying set S. >>> p.math_set.add_from({3, 'a'} ) >>> p.math_set |= [1,2] Remove existing element from set S. >>> p.remove(1) See Also ======== L{MathSet}, L{SubSet}, L{is_subset} @param iterable: mathematical set S of elements, on which this 2^S defined. @type iterable: iterable container """ def __init__(self, iterable=None): """Create new PowerSet over elements contained in S = C{iterable}. This powerset is 2^S. @param iterable: contains elements of set S underlying the PowerSet. @type iterable: iterable of elements which can be hashable or not. """ if iterable is None: iterable = [] self.math_set = MathSet(iterable) def __get__(self, instance, value): return self() def __repr__(self): return 'PowerSet(' + str(self.math_set) + ' )' def __contains__(self, item): """Is item \\in 2^iterable = this powerset(iterable).""" if not isinstance(item, Iterable): raise Exception('Not iterable:\n\t' + str(item) + ',\n' 'this is a powerset, so it contains (math) sets.') return is_subset(item, self.math_set) def __iter__(self): return powerset(self.math_set) def __len__(self): return 2 ** len(self.math_set) def __add__(self, other): if not isinstance(other, PowerSet): raise TypeError('Addition defined only between PowerSets.\n' 'Got instead:\n\t other = ' + str(other)) list1 = self.math_set list2 = other.math_set union = list1 | list2 return PowerSet(union) def __eq__(self, other): if not isinstance(other, PowerSet): raise TypeError('Can only compare to another PowerSet.') return other.math_set == self.math_set def __setattr__(self, name, value): if name == 'math_set' and not isinstance(value, MathSet): msg = ( 'PowerSet.math_set must be of class MathSet.\n' 'Got instead:\n\t' + str(value) + '\nof class:\nt\t' + str(type(value))) raise Exception(msg) object.__setattr__(self, name, value) class TypedDict(dict): """dict subclass where values can be constrained by key. For each key, a domain can optionally be defined, which restricts the admissible values that can be paired with that key. Example ======= >>> d = TypedDict() >>> allowed_values = {'name': {'Maria', 'John'}, 'age': range(122)} >>> default_values = {'name': 'Maria', 'age': 30} >>> d.set_types(allowed_types) >>> d.update(default_values) """ # credits for debugging this go here: # http://stackoverflow.com/questions/2060972/ def __init__(self, *args, **kwargs): self.update(*args, **kwargs) self.allowed_values = dict() def __setitem__(self, i, y): """Raise ValueError if value y not allowed for key i.""" valid_y = True if hasattr(self, 'allowed_values') and i in self.allowed_values: valid_y = False if self.allowed_values[i] is None: valid_y = True else: try: if y in self.allowed_values[i]: valid_y = True except: valid_y = False if not valid_y: msg = ( 'key: ' + str(i) + ', cannot be' ' assigned value: ' + str(y) + '\n' 'Admissible values are:\n\t' + str(self.allowed_values[i])) raise ValueError(msg) super(TypedDict, self).__setitem__(i, y) def __str__(self): return 'TypedDict(' + dict.__str__(self) + ')' def update(self, *args, **kwargs): if args: if len(args) > 1: raise TypeError("update expected at most 1 arguments, " "got %d" % len(args)) other = dict(args[0]) for key in other: self[key] = other[key] for key in kwargs: self[key] = kwargs[key] def setdefault(self, key, value=None): if key not in self: self[key] = value return self[key] def set_types(self, allowed_values): """Restrict values the key can be paired with. @param allowed_values: dict of the form:: {key : values} C{values} must implement C{__contains__} to enable checking validity of values. If C{values} is C{None}, then any value is allowed. """ self.allowed_values = allowed_values def is_consistent(self): """Check if typed keys have consistent values. Use case: changing the object that allowed_values points to can invalidate the assigned values. @rtype: bool """ for k, v in self: if k in self.allowed_values: if v in self.allowed_values[k]: return False return True
# -*- coding: utf-8 -*- from typing import Iterable import os, numpy as np, pandas as pd, pyarrow as pa, pytest, queue from common import NoAuthTestCase from concurrent.futures import Future from mock import patch from gremlin_python.driver.resultset import ResultSet from gremlin_python.structure.graph import Vertex, Edge, Path from graphistry.gremlin import CosmosMixin, GremlinMixin, DROP_QUERY, nodes_to_queries, edges_to_queries from graphistry.plotter import PlotterBase # ### Helpers ### # def fake_client(query_to_result = {}): class FakeCallback: def __init__(self, query: str): self.query = query def result(self): return query_to_result[self.query] class FakeClient: def submitAsync(self, query: str): cb = FakeCallback(query) return cb return FakeClient() class TG(GremlinMixin): def __init__(self, *args, **kwargs): super().__init__() GremlinMixin.__init__(self, *args, **kwargs) class TGFull(GremlinMixin, PlotterBase): def __init__(self, *args, **kwargs): print('TGFull init') super(TGFull, self).__init__(*args, **kwargs) PlotterBase.__init__(self, *args, **kwargs) super(GremlinMixin, self).__init__(*args, **kwargs) class CFull(CosmosMixin, GremlinMixin, PlotterBase): def __init__(self, *args, **kwargs): print('CFull init') #super(CFull, self).__init__(*args, **kwargs) PlotterBase.__init__(self, *args, **kwargs) GremlinMixin.__init__(self, *args, **kwargs) CosmosMixin.__init__(self, *args, **kwargs) def make_resultset(items = []) -> Iterable: q = queue.Queue() for item in items: q.put(item) f = Future() f.set_result([]) rs = ResultSet(q, 'x') rs.done = f return rs # [x for x in rs] # ### Gremlin ### # class TestGremlinMixin(NoAuthTestCase): def test_connect_default_off(self): tg = TG() with self.assertRaises(ValueError): tg.connect() def test_drop(self): tg = TG(gremlin_client=fake_client({DROP_QUERY: 1})) assert tg.drop_graph() is tg def test_run_none(self): tg = TG(gremlin_client=fake_client({})) assert len([x for x in tg.gremlin_run([])]) == 0 def test_run_one(self): tg = TG(gremlin_client=fake_client({'a': 'b'})) g = tg.gremlin_run(['a']) assert next(g) == 'b' for rest in g: raise ValueError('Unexpected additional elements') def test_run_mult(self): tg = TG(gremlin_client=fake_client({'a': 'b', 'c': 'd'})) g = tg.gremlin_run(['a', 'c']) assert next(g) == 'b' assert next(g) == 'd' for rest in g: raise ValueError('Unexpected additional elements') def test_resultset_to_g_empty(self): rs = make_resultset([]) tg = TGFull() g = tg.resultset_to_g(rs) assert g._nodes is None or len(g._nodes) == 0 assert g._edges is None or len(g._edges) == 0 def test_resultset_to_g_empty2(self): rs = make_resultset([[], []]) tg = TGFull() g = tg.resultset_to_g(rs) assert g._nodes is None or len(g._nodes) == 0 assert g._edges is None or len(g._edges) == 0 def test_resultset_to_g_single_edge(self): rs = make_resultset([{'type': 'edge', 'inV': 'a', 'outV': 'b'}]) tg = TGFull() g = tg.resultset_to_g(rs) assert g._nodes is None assert len(g._edges) == 1 assert g._source == 'src' assert g._destination == 'dst' def test_resultset_to_g_edges_attributed(self): edges = [ {'type': 'edge', 'inV': 'a', 'outV': 'b', 'label': 'l1', 'properties': {'x': 'y', 'f': 'g', 'inV': 'ignoreme', 'src': 'ignoreme', 'label': 'ignoreme'}}, {'type': 'edge', 'inV': 'm', 'outV': 'n', 'label': 'l2', 'properties': {'x': 'yy', 'f': 'gg', 'outV': 'ignoreme', 'dst': 'ignoreme', 'label': 'ignoreme'}}, ] rs = make_resultset(edges) tg = TGFull() g = tg.resultset_to_g(rs) assert g._nodes is None assert len(g._edges) == 2 assert g._source == 'src' assert g._destination == 'dst' assert g._edges.to_dict(orient='records') == [ {'src': 'a', 'dst': 'b', 'x': 'y', 'f': 'g', 'label': 'l1', 'inV': 'ignoreme', 'outV': np.nan}, {'src': 'm', 'dst': 'n', 'x': 'yy', 'f': 'gg', 'label': 'l2', 'inV': np.nan, 'outV': 'ignoreme', } ] def test_resultset_to_g_single_node(self): rs = make_resultset([{'type': 'vertex', 'id': 'a', 'label': 'b'}]) tg = TGFull() g = tg.resultset_to_g(rs) assert g._nodes.to_dict(orient='records') == [ {'id': 'a', 'label': 'b'} ] assert g._edges.to_dict(orient='records') == [] assert g._node == 'id' assert g._source == 'src' assert g._destination == 'dst' def test_resultset_to_g_multi_node_attributed(self): nodes = [ {'type': 'vertex', 'id': 'a', 'label': 'b', 'properties': { 'a': 'b', 'c': 'd', 'id': 'ignoreme'}}, {'type': 'vertex', 'id': 'b', 'label': 'bb', 'properties': { 'a': 'bb', 'c': 'dd', 'label': 'ignoreme'}} ] rs = make_resultset(nodes) tg = TGFull() g = tg.resultset_to_g(rs) assert g._nodes.to_dict(orient='records') == [ {'id': 'a', 'label': 'b', 'a': 'b', 'c': 'd'}, {'id': 'b', 'label': 'bb', 'a': 'bb', 'c': 'dd'} ] assert g._edges.to_dict(orient='records') == [] assert g._node == 'id' assert g._source == 'src' assert g._destination == 'dst' def test_resultset_to_g_vertex_stucture(self): rs = make_resultset([Vertex(id='a', label='b')]) tg = TGFull() g = tg.resultset_to_g(rs) assert g._nodes.to_dict(orient='records') == [ {'id': 'a', 'label': 'b'} ] assert g._edges.to_dict(orient='records') == [] assert g._node == 'id' assert g._source == 'src' assert g._destination == 'dst' def test_resultset_to_g_edge_stucture(self): inV = Vertex(id='a', label='b') outV = Vertex(id='c', label='d') e = Edge(id='a', outV=outV, label='e', inV=inV) rs = make_resultset([e]) tg = TGFull() g = tg.resultset_to_g(rs) assert g._edges.to_dict(orient='records') == [ {'src': 'a', 'dst': 'c', 'id': 'a', 'label': 'e'} ] assert g._nodes.to_dict(orient='records') == [ {'id': 'a', 'label': 'b'}, {'id': 'c', 'label': 'd'} ] def test_resultset_to_g_edge_stucture_dedup(self): inV = Vertex(id='a', label='b') outV = inV e = Edge(id='a', outV=outV, label='e', inV=inV) rs = make_resultset([e]) tg = TGFull() g = tg.resultset_to_g(rs) assert g._edges.to_dict(orient='records') == [ {'src': 'a', 'dst': 'a', 'id': 'a', 'label': 'e'} ] assert g._nodes.to_dict(orient='records') == [ {'id': 'a', 'label': 'b'} ] def test_gremlin_none(self): tg = TGFull(gremlin_client=fake_client()) g = tg.gremlin([]) assert g._edges.to_dict(orient='records') == [] def test_gremlin_one_edge(self): tg = TGFull(gremlin_client=fake_client({'g.E()': [ [ {'type': 'edge', 'inV': 'a', 'outV': 'b', 'properties': {'x': 'y', 'f': 'g'}} ] ]})) g = tg.gremlin(['g.E()']) assert g._nodes is None assert g._edges.to_dict(orient='records') == [ {'src': 'a', 'dst': 'b', 'x': 'y', 'f': 'g'} ] def test_nodes_to_queries_mt(self): df = pd.DataFrame({'n': [], 'v1': []}) g = PlotterBase() assert len([ x for x in nodes_to_queries(g.nodes(df, 'n'), untyped=True)]) == 0 def test_nodes_to_queries_single_untyped(self): df = pd.DataFrame({'n': ['i'], 'v1': [2]}) g = PlotterBase() assert [ x for x in nodes_to_queries(g.nodes(df, 'n'), untyped=True)][0] == "g.addV().property('n', 'i').property('v1', '2')" def test_nodes_to_queries_single_typed(self): df = pd.DataFrame({'n': ['i'], 'v1': [2]}) g = PlotterBase() assert [ x for x in nodes_to_queries(g.nodes(df, 'n'), type_col='n')][0] == "g.addV('i').property('v1', '2')" def test_nodes_to_queries_single_typed_inferred_type(self): df = pd.DataFrame({'type': ['i'], 'v1': [2]}) g = PlotterBase() assert [ x for x in nodes_to_queries(g.nodes(df, 'n'))][0] == "g.addV('i').property('v1', '2')" def test_nodes_to_queries_single_typed_inferred_category(self): df = pd.DataFrame({'category': ['i'], 'v1': [2]}) g = PlotterBase() assert [ x for x in nodes_to_queries(g.nodes(df, 'n'))][0] == "g.addV('i').property('v1', '2')" def test_nodes_to_queries_multi(self): df = pd.DataFrame({'n': ['i', 'i2'], 'v1': [2, 3]}) g = PlotterBase() assert len([ x for x in nodes_to_queries(g.nodes(df, 'n'), untyped=True)]) == 2 def test_edge_to_queries_mt(self): df = pd.DataFrame({'s': [], 'd': []}) g = PlotterBase() assert len([ x for x in edges_to_queries(g.edges(df, 's', 'd'), untyped=True)]) == 0 def test_edge_to_queries_single_untyped(self): df = pd.DataFrame({'s': ['a'], 'd': ['b']}) g = PlotterBase() assert [ x for x in edges_to_queries(g.edges(df, 's', 'd'), untyped=True)][0] == "g.v('a').addE().to(g.v('b'))" def test_edge_to_queries_single_untyped_attributed(self): df = pd.DataFrame({'s': ['a'], 'd': ['b'], 'v1': [2]}) g = PlotterBase() assert [ x for x in edges_to_queries(g.edges(df, 's', 'd'), untyped=True)][0] == "g.v('a').addE().to(g.v('b')).property('v1', '2')" def test_edge_to_queries_single_typed_attributed(self): df = pd.DataFrame({'s': ['a'], 'd': ['b'], 'v1': [2], 't': ['x']}) g = PlotterBase() assert [ x for x in edges_to_queries(g.edges(df, 's', 'd'), type_col='t')][0] == "g.v('a').addE('x').to(g.v('b')).property('v1', '2')" def test_edge_to_queries_single_typed_inferred_type(self): df = pd.DataFrame({'s': ['a'], 'd': ['b'], 'v1': [2], 'type': ['x']}) g = PlotterBase() assert [ x for x in edges_to_queries(g.edges(df, 's', 'd'))][0] == "g.v('a').addE('x').to(g.v('b')).property('v1', '2')" def test_edge_to_queries_single_typed_inferred_edgeType(self): df = pd.DataFrame({'s': ['a'], 'd': ['b'], 'v1': [2], 'edgeType': ['x']}) g = PlotterBase() assert [ x for x in edges_to_queries(g.edges(df, 's', 'd'))][0] == "g.v('a').addE('x').to(g.v('b')).property('v1', '2')" def test_edge_to_queries_single_typed_inferred_category(self): df = pd.DataFrame({'s': ['a'], 'd': ['b'], 'v1': [2], 'category': ['x']}) g = PlotterBase() assert [ x for x in edges_to_queries(g.edges(df, 's', 'd'))][0] == "g.v('a').addE('x').to(g.v('b')).property('v1', '2')" class TestCosmosMixin(NoAuthTestCase): def test_cosmos_init(self): cg = CFull(gremlin_client=fake_client({'g.E()': [ [ {'type': 'edge', 'inV': 'a', 'outV': 'b', 'properties': {'x': 'y', 'f': 'g'}} ] ]})) g = cg.gremlin(['g.E()']) assert g._nodes is None assert g._edges.to_dict(orient='records') == [ {'src': 'a', 'dst': 'b', 'x': 'y', 'f': 'g'} ]
""" Soft Voting/Majority Rule classifier and Voting regressor. This module contains: - A Soft Voting/Majority Rule classifier for classification estimators. - A Voting regressor for regression estimators. """ # Authors: Sebastian Raschka <se.raschka@gmail.com>, # Gilles Louppe <g.louppe@gmail.com>, # Ramil Nugmanov <stsouko@live.ru> # Mohamed Ali Jamaoui <m.ali.jamaoui@gmail.com> # # License: BSD 3 clause from abc import abstractmethod import numbers import numpy as np from joblib import Parallel from ..base import ClassifierMixin from ..base import RegressorMixin from ..base import TransformerMixin from ..base import clone from ._base import _fit_single_estimator from ._base import _BaseHeterogeneousEnsemble from ..preprocessing import LabelEncoder from ..utils import Bunch from ..utils import check_scalar from ..utils.metaestimators import available_if from ..utils.validation import check_is_fitted from ..utils.multiclass import check_classification_targets from ..utils.validation import column_or_1d from ..exceptions import NotFittedError from ..utils._estimator_html_repr import _VisualBlock from ..utils.fixes import delayed class _BaseVoting(TransformerMixin, _BaseHeterogeneousEnsemble): """Base class for voting. Warning: This class should not be used directly. Use derived classes instead. """ def _log_message(self, name, idx, total): if not self.verbose: return None return f"({idx} of {total}) Processing {name}" @property def _weights_not_none(self): """Get the weights of not `None` estimators.""" if self.weights is None: return None return [w for est, w in zip(self.estimators, self.weights) if est[1] != "drop"] def _predict(self, X): """Collect results from clf.predict calls.""" return np.asarray([est.predict(X) for est in self.estimators_]).T @abstractmethod def fit(self, X, y, sample_weight=None): """Get common fit operations.""" names, clfs = self._validate_estimators() check_scalar( self.verbose, name="verbose", target_type=(numbers.Integral, np.bool_), min_val=0, ) if self.weights is not None and len(self.weights) != len(self.estimators): raise ValueError( "Number of `estimators` and weights must be equal; got" f" {len(self.weights)} weights, {len(self.estimators)} estimators" ) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_fit_single_estimator)( clone(clf), X, y, sample_weight=sample_weight, message_clsname="Voting", message=self._log_message(names[idx], idx + 1, len(clfs)), ) for idx, clf in enumerate(clfs) if clf != "drop" ) self.named_estimators_ = Bunch() # Uses 'drop' as placeholder for dropped estimators est_iter = iter(self.estimators_) for name, est in self.estimators: current_est = est if est == "drop" else next(est_iter) self.named_estimators_[name] = current_est if hasattr(current_est, "feature_names_in_"): self.feature_names_in_ = current_est.feature_names_in_ return self def fit_transform(self, X, y=None, **fit_params): """Return class labels or probabilities for each estimator. Return predictions for X for each estimator. Parameters ---------- X : {array-like, sparse matrix, dataframe} of shape \ (n_samples, n_features) Input samples. y : ndarray of shape (n_samples,), default=None Target values (None for unsupervised transformations). **fit_params : dict Additional fit parameters. Returns ------- X_new : ndarray array of shape (n_samples, n_features_new) Transformed array. """ return super().fit_transform(X, y, **fit_params) @property def n_features_in_(self): """Number of features seen during :term:`fit`.""" # For consistency with other estimators we raise a AttributeError so # that hasattr() fails if the estimator isn't fitted. try: check_is_fitted(self) except NotFittedError as nfe: raise AttributeError( "{} object has no n_features_in_ attribute.".format( self.__class__.__name__ ) ) from nfe return self.estimators_[0].n_features_in_ def _sk_visual_block_(self): names, estimators = zip(*self.estimators) return _VisualBlock("parallel", estimators, names=names) def _more_tags(self): return {"preserves_dtype": []} class VotingClassifier(ClassifierMixin, _BaseVoting): """Soft Voting/Majority Rule classifier for unfitted estimators. Read more in the :ref:`User Guide <voting_classifier>`. .. versionadded:: 0.17 Parameters ---------- estimators : list of (str, estimator) tuples Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones of those original estimators that will be stored in the class attribute ``self.estimators_``. An estimator can be set to ``'drop'`` using :meth:`set_params`. .. versionchanged:: 0.21 ``'drop'`` is accepted. Using None was deprecated in 0.22 and support was removed in 0.24. voting : {'hard', 'soft'}, default='hard' If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probabilities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like of shape (n_classifiers,), default=None Sequence of weights (`float` or `int`) to weight the occurrences of predicted class labels (`hard` voting) or class probabilities before averaging (`soft` voting). Uses uniform weights if `None`. n_jobs : int, default=None The number of jobs to run in parallel for ``fit``. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. .. versionadded:: 0.18 flatten_transform : bool, default=True Affects shape of transform output only when voting='soft' If voting='soft' and flatten_transform=True, transform method returns matrix with shape (n_samples, n_classifiers * n_classes). If flatten_transform=False, it returns (n_classifiers, n_samples, n_classes). verbose : bool, default=False If True, the time elapsed while fitting will be printed as it is completed. .. versionadded:: 0.23 Attributes ---------- estimators_ : list of classifiers The collection of fitted sub-estimators as defined in ``estimators`` that are not 'drop'. named_estimators_ : :class:`~sklearn.utils.Bunch` Attribute to access any fitted sub-estimators by name. .. versionadded:: 0.20 le_ : :class:`~sklearn.preprocessing.LabelEncoder` Transformer used to encode the labels during fit and decode during prediction. classes_ : ndarray of shape (n_classes,) The classes labels. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying classifier exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if the underlying estimators expose such an attribute when fit. .. versionadded:: 1.0 See Also -------- VotingRegressor : Prediction voting regressor. Examples -------- >>> import numpy as np >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier >>> clf1 = LogisticRegression(multi_class='multinomial', random_state=1) >>> clf2 = RandomForestClassifier(n_estimators=50, random_state=1) >>> clf3 = GaussianNB() >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> eclf1 = VotingClassifier(estimators=[ ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard') >>> eclf1 = eclf1.fit(X, y) >>> print(eclf1.predict(X)) [1 1 1 2 2 2] >>> np.array_equal(eclf1.named_estimators_.lr.predict(X), ... eclf1.named_estimators_['lr'].predict(X)) True >>> eclf2 = VotingClassifier(estimators=[ ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], ... voting='soft') >>> eclf2 = eclf2.fit(X, y) >>> print(eclf2.predict(X)) [1 1 1 2 2 2] To drop an estimator, :meth:`set_params` can be used to remove it. Here we dropped one of the estimators, resulting in 2 fitted estimators: >>> eclf2 = eclf2.set_params(lr='drop') >>> eclf2 = eclf2.fit(X, y) >>> len(eclf2.estimators_) 2 Setting `flatten_transform=True` with `voting='soft'` flattens output shape of `transform`: >>> eclf3 = VotingClassifier(estimators=[ ... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], ... voting='soft', weights=[2,1,1], ... flatten_transform=True) >>> eclf3 = eclf3.fit(X, y) >>> print(eclf3.predict(X)) [1 1 1 2 2 2] >>> print(eclf3.transform(X).shape) (6, 6) """ def __init__( self, estimators, *, voting="hard", weights=None, n_jobs=None, flatten_transform=True, verbose=False, ): super().__init__(estimators=estimators) self.voting = voting self.weights = weights self.n_jobs = n_jobs self.flatten_transform = flatten_transform self.verbose = verbose def fit(self, X, y, sample_weight=None): """Fit the estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Note that this is supported only if all underlying estimators support sample weights. .. versionadded:: 0.18 Returns ------- self : object Returns the instance itself. """ check_classification_targets(y) if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1: raise NotImplementedError( "Multilabel and multi-output classification is not supported." ) check_scalar( self.flatten_transform, name="flatten_transform", target_type=(numbers.Integral, np.bool_), ) if self.voting not in ("soft", "hard"): raise ValueError( f"Voting must be 'soft' or 'hard'; got (voting={self.voting!r})" ) self.le_ = LabelEncoder().fit(y) self.classes_ = self.le_.classes_ transformed_y = self.le_.transform(y) return super().fit(X, transformed_y, sample_weight) def predict(self, X): """Predict class labels for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Returns ------- maj : array-like of shape (n_samples,) Predicted class labels. """ check_is_fitted(self) if self.voting == "soft": maj = np.argmax(self.predict_proba(X), axis=1) else: # 'hard' voting predictions = self._predict(X) maj = np.apply_along_axis( lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)), axis=1, arr=predictions, ) maj = self.le_.inverse_transform(maj) return maj def _collect_probas(self, X): """Collect results from clf.predict calls.""" return np.asarray([clf.predict_proba(X) for clf in self.estimators_]) def _check_voting(self): if self.voting == "hard": raise AttributeError( f"predict_proba is not available when voting={repr(self.voting)}" ) return True @available_if(_check_voting) def predict_proba(self, X): """Compute probabilities of possible outcomes for samples in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Returns ------- avg : array-like of shape (n_samples, n_classes) Weighted average probability for each class per sample. """ check_is_fitted(self) avg = np.average( self._collect_probas(X), axis=0, weights=self._weights_not_none ) return avg def transform(self, X): """Return class labels or probabilities for X for each estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- probabilities_or_labels If `voting='soft'` and `flatten_transform=True`: returns ndarray of shape (n_classifiers, n_samples * n_classes), being class probabilities calculated by each classifier. If `voting='soft' and `flatten_transform=False`: ndarray of shape (n_classifiers, n_samples, n_classes) If `voting='hard'`: ndarray of shape (n_samples, n_classifiers), being class labels predicted by each classifier. """ check_is_fitted(self) if self.voting == "soft": probas = self._collect_probas(X) if not self.flatten_transform: return probas return np.hstack(probas) else: return self._predict(X) class VotingRegressor(RegressorMixin, _BaseVoting): """Prediction voting regressor for unfitted estimators. A voting regressor is an ensemble meta-estimator that fits several base regressors, each on the whole dataset. Then it averages the individual predictions to form a final prediction. Read more in the :ref:`User Guide <voting_regressor>`. .. versionadded:: 0.21 Parameters ---------- estimators : list of (str, estimator) tuples Invoking the ``fit`` method on the ``VotingRegressor`` will fit clones of those original estimators that will be stored in the class attribute ``self.estimators_``. An estimator can be set to ``'drop'`` using :meth:`set_params`. .. versionchanged:: 0.21 ``'drop'`` is accepted. Using None was deprecated in 0.22 and support was removed in 0.24. weights : array-like of shape (n_regressors,), default=None Sequence of weights (`float` or `int`) to weight the occurrences of predicted values before averaging. Uses uniform weights if `None`. n_jobs : int, default=None The number of jobs to run in parallel for ``fit``. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : bool, default=False If True, the time elapsed while fitting will be printed as it is completed. .. versionadded:: 0.23 Attributes ---------- estimators_ : list of regressors The collection of fitted sub-estimators as defined in ``estimators`` that are not 'drop'. named_estimators_ : :class:`~sklearn.utils.Bunch` Attribute to access any fitted sub-estimators by name. .. versionadded:: 0.20 n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying regressor exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if the underlying estimators expose such an attribute when fit. .. versionadded:: 1.0 See Also -------- VotingClassifier : Soft Voting/Majority Rule classifier. Examples -------- >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> from sklearn.ensemble import RandomForestRegressor >>> from sklearn.ensemble import VotingRegressor >>> from sklearn.neighbors import KNeighborsRegressor >>> r1 = LinearRegression() >>> r2 = RandomForestRegressor(n_estimators=10, random_state=1) >>> r3 = KNeighborsRegressor() >>> X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]]) >>> y = np.array([2, 6, 12, 20, 30, 42]) >>> er = VotingRegressor([('lr', r1), ('rf', r2), ('r3', r3)]) >>> print(er.fit(X, y).predict(X)) [ 6.8... 8.4... 12.5... 17.8... 26... 34...] In the following example, we drop the `'lr'` estimator with :meth:`~VotingRegressor.set_params` and fit the remaining two estimators: >>> er = er.set_params(lr='drop') >>> er = er.fit(X, y) >>> len(er.estimators_) 2 """ def __init__(self, estimators, *, weights=None, n_jobs=None, verbose=False): super().__init__(estimators=estimators) self.weights = weights self.n_jobs = n_jobs self.verbose = verbose def fit(self, X, y, sample_weight=None): """Fit the estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Note that this is supported only if all underlying estimators support sample weights. Returns ------- self : object Fitted estimator. """ y = column_or_1d(y, warn=True) return super().fit(X, y, sample_weight) def predict(self, X): """Predict regression target for X. The predicted regression target of an input sample is computed as the mean predicted regression targets of the estimators in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Returns ------- y : ndarray of shape (n_samples,) The predicted values. """ check_is_fitted(self) return np.average(self._predict(X), axis=1, weights=self._weights_not_none) def transform(self, X): """Return predictions for X for each estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Returns ------- predictions : ndarray of shape (n_samples, n_classifiers) Values predicted by each regressor. """ check_is_fitted(self) return self._predict(X)
#!/usr/bin/python # -*- coding: utf-8 -*- import platform import gtk from interfaces.signalizable import Signalizable from objects.tag import Tag from objects import HORIZONTAL, VERTICAL if platform.system() != 'Windows': gtk.threads_init() import cairo import pango import pangocairo class Ruler(gtk.Viewport, Signalizable): """This class represents a non-orientated ruler interface""" def __init__(self, orientation=VERTICAL): gtk.Viewport.__init__(self) Signalizable.__init__(self) self.orientation = orientation self.x = 0 self.y = 0 self.offset = 0 self.tags = list() self.zoom = 1.0 self.show_position = True self.layout = gtk.Layout() self.add(self.layout) size = 25 if self.orientation == HORIZONTAL: self.set_size_request(-1, size) elif self.orientation == VERTICAL: self.set_size_request(size, -1) self.add_events(gtk.gdk.POINTER_MOTION_MASK) self.layout.add_events(gtk.gdk.EXPOSURE_MASK) self.add_events(gtk.gdk.BUTTON_RELEASE_MASK) self.connect("motion-notify-event", self.motion, False) self.connect("button-release-event", self.release) self.connect("button-press-event", self.press) self.layout.connect("expose-event", self.expose) self.install_signal("append-tag") self.install_signal("move-tag") def motion(self, widget, event, external): if self.orientation == HORIZONTAL: self.x = event.x - self.offset * external for tag in self.tags: if tag.selected and event.state & gtk.gdk.BUTTON1_MASK: tag.move(self.x) self.emit("move-tag", tag) elif tag.at_position(self.x): widget.window.set_cursor(gtk.gdk.Cursor(tag.get_cursor())) self.show_position = False break else: widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW)) self.show_position = True elif self.orientation == VERTICAL: self.y = event.y - self.offset * external for tag in self.tags: if tag.selected and event.state & gtk.gdk.BUTTON1_MASK: tag.move(self.y) self.emit("move-tag", tag) elif tag.at_position(self.y): widget.window.set_cursor(gtk.gdk.Cursor(tag.get_cursor())) self.show_position = False break else: widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW)) self.show_position = True self.queue_draw() return True def press(self, widget, event): position = 0 if self.orientation == HORIZONTAL: position = event.x if self.orientation == VERTICAL: position = event.y for tag in self.tags: if tag.at_position(position): tag.selected = True break def release(self, widget, event): select = False for tag in self.tags: if tag.selected: select = True tag.selected = False if not select: tag = Tag() tag.orientation = self.orientation if self.orientation == HORIZONTAL: tag.position = event.x elif self.orientation == VERTICAL: tag.position = event.y self.tags.append(tag) self.emit("append-tag", tag) return True def expose(self, widget, event): context = widget.bin_window.cairo_create() context.set_antialias(cairo.ANTIALIAS_NONE) width, height = widget.window.get_size() size = width if self.orientation == HORIZONTAL else height context.set_dash([]) def draw_lines(context, position, margin, size, unit, zoom): while position <= size: if self.orientation == HORIZONTAL: context.move_to(position * zoom, margin) context.line_to(position * zoom, size) elif self.orientation == VERTICAL: context.move_to(margin, position * zoom) context.line_to(size, position * zoom) position += unit context.set_line_width(1) draw_lines(context, 25, 18, size, 10, self.zoom) draw_lines(context, 25, 10, size, 50, self.zoom) context.stroke() context.set_line_width(2) draw_lines(context, 25, 8, size, 100, self.zoom) context.stroke() context.set_line_width(3) for tag in self.tags: tag.draw_tag(context) context.stroke() if self.show_position: context.set_line_width(1) context.set_source_rgb(0.0, 0.0, 0.75) border = 2 if self.orientation == HORIZONTAL and self.x: context.move_to(self.x, border) context.line_to(self.x, size) elif self.orientation == VERTICAL and self.y: context.move_to(border, self.y) context.line_to(size - border, self.y) context.stroke() context = pangocairo.CairoContext(context) layout = pangocairo.CairoContext.create_layout(context) fontname = 'Sans' if platform.system() == 'Windows' else 'Ubuntu' size = 8 description = '%s %d' % (fontname, size) font = pango.FontDescription(description) layout.set_justify(True) layout.set_font_description(font) text = None if self.orientation == HORIZONTAL: context.move_to(self.x + 2, 0) text = str(int(self.x)) elif self.orientation == VERTICAL: context.move_to(2, self.y) text = str(int(self.y)) layout.set_text(text) context.set_antialias(cairo.ANTIALIAS_DEFAULT) context.set_source_rgb(0.0, 0.0, 0.0) context.show_layout(layout) return True class HorizontalRuler(Ruler): """This class represents a horizontal ruler""" def __init__(self): Ruler.__init__(self, HORIZONTAL) class VerticalRuler(Ruler): """This class represents a vertical ruler""" def __init__(self): Ruler.__init__(self) if __name__ == '__main__': horizontal_window = gtk.Window() horizontal_window.connect("delete-event", gtk.main_quit) horizontal_ruler = HorizontalRuler() horizontal_window.add(horizontal_ruler) horizontal_window.show_all() vertical_window = gtk.Window() vertical_window.connect("delete-event", gtk.main_quit) vertical_ruler = VerticalRuler() vertical_window.add(vertical_ruler) vertical_window.show_all() gtk.main()
import re from contextlib import contextmanager from parso.python.errors import ErrorFinder, ErrorFinderConfig from parso.normalizer import Rule from parso.python.tree import search_ancestor, Flow, Scope _IMPORT_TYPES = ('import_name', 'import_from') _SUITE_INTRODUCERS = ('classdef', 'funcdef', 'if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt') _NON_STAR_TYPES = ('term', 'import_from', 'power') _OPENING_BRACKETS = '(', '[', '{' _CLOSING_BRACKETS = ')', ']', '}' _FACTOR = '+', '-', '~' _ALLOW_SPACE = '*', '+', '-', '**', '/', '//', '@' _BITWISE_OPERATOR = '<<', '>>', '|', '&', '^' _NEEDS_SPACE = ('=', '%', '->', '<', '>', '==', '>=', '<=', '<>', '!=', '+=', '-=', '*=', '@=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '**=', '//=') _NEEDS_SPACE += _BITWISE_OPERATOR _IMPLICIT_INDENTATION_TYPES = ('dictorsetmaker', 'argument') _POSSIBLE_SLICE_PARENTS = ('subscript', 'subscriptlist', 'sliceop') class IndentationTypes(object): VERTICAL_BRACKET = object() HANGING_BRACKET = object() BACKSLASH = object() SUITE = object() IMPLICIT = object() class IndentationNode(object): type = IndentationTypes.SUITE def __init__(self, config, indentation, parent=None): self.bracket_indentation = self.indentation = indentation self.parent = parent def __repr__(self): return '<%s>' % self.__class__.__name__ def get_latest_suite_node(self): n = self while n is not None: if n.type == IndentationTypes.SUITE: return n n = n.parent class BracketNode(IndentationNode): def __init__(self, config, leaf, parent, in_suite_introducer=False): self.leaf = leaf # Figure out here what the indentation is. For chained brackets # we can basically use the previous indentation. previous_leaf = leaf n = parent if n.type == IndentationTypes.IMPLICIT: n = n.parent while True: if hasattr(n, 'leaf') and previous_leaf.line != n.leaf.line: break previous_leaf = previous_leaf.get_previous_leaf() if not isinstance(n, BracketNode) or previous_leaf != n.leaf: break n = n.parent parent_indentation = n.indentation next_leaf = leaf.get_next_leaf() if '\n' in next_leaf.prefix: # This implies code like: # foobarbaz( # a, # b, # ) self.bracket_indentation = parent_indentation \ + config.closing_bracket_hanging_indentation self.indentation = parent_indentation + config.indentation self.type = IndentationTypes.HANGING_BRACKET else: # Implies code like: # foobarbaz( # a, # b, # ) expected_end_indent = leaf.end_pos[1] if '\t' in config.indentation: self.indentation = None else: self.indentation = ' ' * expected_end_indent self.bracket_indentation = self.indentation self.type = IndentationTypes.VERTICAL_BRACKET if in_suite_introducer and parent.type == IndentationTypes.SUITE \ and self.indentation == parent_indentation + config.indentation: self.indentation += config.indentation # The closing bracket should have the same indentation. self.bracket_indentation = self.indentation self.parent = parent class ImplicitNode(BracketNode): """ Implicit indentation after keyword arguments, default arguments, annotations and dict values. """ def __init__(self, config, leaf, parent): super(ImplicitNode, self).__init__(config, leaf, parent) self.type = IndentationTypes.IMPLICIT next_leaf = leaf.get_next_leaf() if leaf == ':' and '\n' not in next_leaf.prefix: self.indentation += ' ' class BackslashNode(IndentationNode): type = IndentationTypes.BACKSLASH def __init__(self, config, parent_indentation, containing_leaf, spacing, parent=None): expr_stmt = search_ancestor(containing_leaf, 'expr_stmt') if expr_stmt is not None: equals = expr_stmt.children[-2] if '\t' in config.indentation: # TODO unite with the code of BracketNode self.indentation = None else: # If the backslash follows the equals, use normal indentation # otherwise it should align with the equals. if equals.end_pos == spacing.start_pos: self.indentation = parent_indentation + config.indentation else: # +1 because there is a space. self.indentation = ' ' * (equals.end_pos[1] + 1) else: self.indentation = parent_indentation + config.indentation self.bracket_indentation = self.indentation self.parent = parent def _is_magic_name(name): return name.value.startswith('__') and name.value.endswith('__') class PEP8Normalizer(ErrorFinder): def __init__(self, *args, **kwargs): super(PEP8Normalizer, self).__init__(*args, **kwargs) self._previous_part = None self._previous_leaf = None self._on_newline = True self._newline_count = 0 self._wanted_newline_count = None self._max_new_lines_in_prefix = 0 self._new_statement = True self._implicit_indentation_possible = False # The top of stack of the indentation nodes. self._indentation_tos = self._last_indentation_tos = \ IndentationNode(self._config, indentation='') self._in_suite_introducer = False if ' ' in self._config.indentation: self._indentation_type = 'spaces' self._wrong_indentation_char = '\t' else: self._indentation_type = 'tabs' self._wrong_indentation_char = ' ' @contextmanager def visit_node(self, node): with super(PEP8Normalizer, self).visit_node(node): with self._visit_node(node): yield @contextmanager def _visit_node(self, node): typ = node.type if typ in 'import_name': names = node.get_defined_names() if len(names) > 1: for name in names[:1]: self.add_issue(name, 401, 'Multiple imports on one line') elif typ == 'lambdef': expr_stmt = node.parent # Check if it's simply defining a single name, not something like # foo.bar or x[1], where using a lambda could make more sense. if expr_stmt.type == 'expr_stmt' and any(n.type == 'name' for n in expr_stmt.children[:-2:2]): self.add_issue(node, 731, 'Do not assign a lambda expression, use a def') elif typ == 'try_stmt': for child in node.children: # Here we can simply check if it's an except, because otherwise # it would be an except_clause. if child.type == 'keyword' and child.value == 'except': self.add_issue(child, 722, 'Do not use bare except, specify exception instead') elif typ == 'comparison': for child in node.children: if child.type not in ('atom_expr', 'power'): continue if len(child.children) > 2: continue trailer = child.children[1] atom = child.children[0] if trailer.type == 'trailer' and atom.type == 'name' \ and atom.value == 'type': self.add_issue(node, 721, "Do not compare types, use 'isinstance()") break elif typ == 'file_input': endmarker = node.children[-1] prev = endmarker.get_previous_leaf() prefix = endmarker.prefix if (not prefix.endswith('\n') and ( prefix or prev is None or prev.value != '\n')): self.add_issue(endmarker, 292, "No newline at end of file") if typ in _IMPORT_TYPES: simple_stmt = node.parent module = simple_stmt.parent #if module.type == 'simple_stmt': if module.type == 'file_input': index = module.children.index(simple_stmt) for child in module.children[:index]: children = [child] if child.type == 'simple_stmt': # Remove the newline. children = child.children[:-1] found_docstring = False for c in children: if c.type == 'string' and not found_docstring: continue found_docstring = True if c.type == 'expr_stmt' and \ all(_is_magic_name(n) for n in c.get_defined_names()): continue if c.type in _IMPORT_TYPES or isinstance(c, Flow): continue self.add_issue(node, 402, 'Module level import not at top of file') break else: continue break implicit_indentation_possible = typ in _IMPLICIT_INDENTATION_TYPES in_introducer = typ in _SUITE_INTRODUCERS if in_introducer: self._in_suite_introducer = True elif typ == 'suite': if self._indentation_tos.type == IndentationTypes.BACKSLASH: self._indentation_tos = self._indentation_tos.parent self._indentation_tos = IndentationNode( self._config, self._indentation_tos.indentation + self._config.indentation, parent=self._indentation_tos ) elif implicit_indentation_possible: self._implicit_indentation_possible = True yield if typ == 'suite': assert self._indentation_tos.type == IndentationTypes.SUITE self._indentation_tos = self._indentation_tos.parent # If we dedent, no lines are needed anymore. self._wanted_newline_count = None elif implicit_indentation_possible: self._implicit_indentation_possible = False if self._indentation_tos.type == IndentationTypes.IMPLICIT: self._indentation_tos = self._indentation_tos.parent elif in_introducer: self._in_suite_introducer = False if typ in ('classdef', 'funcdef'): self._wanted_newline_count = self._get_wanted_blank_lines_count() def _check_tabs_spaces(self, spacing): if self._wrong_indentation_char in spacing.value: self.add_issue(spacing, 101, 'Indentation contains ' + self._indentation_type) return True return False def _get_wanted_blank_lines_count(self): suite_node = self._indentation_tos.get_latest_suite_node() return int(suite_node.parent is None) + 1 def _reset_newlines(self, spacing, leaf, is_comment=False): self._max_new_lines_in_prefix = \ max(self._max_new_lines_in_prefix, self._newline_count) wanted = self._wanted_newline_count if wanted is not None: # Need to substract one blank_lines = self._newline_count - 1 if wanted > blank_lines and leaf.type != 'endmarker': # In case of a comment we don't need to add the issue, yet. if not is_comment: # TODO end_pos wrong. code = 302 if wanted == 2 else 301 message = "expected %s blank line, found %s" \ % (wanted, blank_lines) self.add_issue(spacing, code, message) self._wanted_newline_count = None else: self._wanted_newline_count = None if not is_comment: wanted = self._get_wanted_blank_lines_count() actual = self._max_new_lines_in_prefix - 1 val = leaf.value needs_lines = ( val == '@' and leaf.parent.type == 'decorator' or ( val == 'class' or val == 'async' and leaf.get_next_leaf() == 'def' or val == 'def' and self._previous_leaf != 'async' ) and leaf.parent.parent.type != 'decorated' ) if needs_lines and actual < wanted: func_or_cls = leaf.parent suite = func_or_cls.parent if suite.type == 'decorated': suite = suite.parent # The first leaf of a file or a suite should not need blank # lines. if suite.children[int(suite.type == 'suite')] != func_or_cls: code = 302 if wanted == 2 else 301 message = "expected %s blank line, found %s" \ % (wanted, actual) self.add_issue(spacing, code, message) self._max_new_lines_in_prefix = 0 self._newline_count = 0 def visit_leaf(self, leaf): super(PEP8Normalizer, self).visit_leaf(leaf) for part in leaf._split_prefix(): if part.type == 'spacing': # This part is used for the part call after for. break self._visit_part(part, part.create_spacing_part(), leaf) self._analyse_non_prefix(leaf) self._visit_part(leaf, part, leaf) # Cleanup self._last_indentation_tos = self._indentation_tos self._new_statement = leaf.type == 'newline' # TODO does this work? with brackets and stuff? if leaf.type == 'newline' and \ self._indentation_tos.type == IndentationTypes.BACKSLASH: self._indentation_tos = self._indentation_tos.parent if leaf.value == ':' and leaf.parent.type in _SUITE_INTRODUCERS: self._in_suite_introducer = False elif leaf.value == 'elif': self._in_suite_introducer = True if not self._new_statement: self._reset_newlines(part, leaf) self._max_blank_lines = 0 self._previous_leaf = leaf return leaf.value def _visit_part(self, part, spacing, leaf): value = part.value type_ = part.type if type_ == 'error_leaf': return if value == ',' and part.parent.type == 'dictorsetmaker': self._indentation_tos = self._indentation_tos.parent node = self._indentation_tos if type_ == 'comment': if value.startswith('##'): # Whole blocks of # should not raise an error. if value.lstrip('#'): self.add_issue(part, 266, "Too many leading '#' for block comment.") elif self._on_newline: if not re.match(r'#:? ', value) and not value == '#' \ and not (value.startswith('#!') and part.start_pos == (1, 0)): self.add_issue(part, 265, "Block comment should start with '# '") else: if not re.match(r'#:? [^ ]', value): self.add_issue(part, 262, "Inline comment should start with '# '") self._reset_newlines(spacing, leaf, is_comment=True) elif type_ == 'newline': if self._newline_count > self._get_wanted_blank_lines_count(): self.add_issue(part, 303, "Too many blank lines (%s)" % self._newline_count) elif leaf in ('def', 'class') \ and leaf.parent.parent.type == 'decorated': self.add_issue(part, 304, "Blank lines found after function decorator") self._newline_count += 1 if type_ == 'backslash': # TODO is this enough checking? What about ==? if node.type != IndentationTypes.BACKSLASH: if node.type != IndentationTypes.SUITE: self.add_issue(part, 502, 'The backslash is redundant between brackets') else: indentation = node.indentation if self._in_suite_introducer and node.type == IndentationTypes.SUITE: indentation += self._config.indentation self._indentation_tos = BackslashNode( self._config, indentation, part, spacing, parent=self._indentation_tos ) elif self._on_newline: indentation = spacing.value if node.type == IndentationTypes.BACKSLASH \ and self._previous_part.type == 'newline': self._indentation_tos = self._indentation_tos.parent if not self._check_tabs_spaces(spacing): should_be_indentation = node.indentation if type_ == 'comment': # Comments can be dedented. So we have to care for that. n = self._last_indentation_tos while True: if len(indentation) > len(n.indentation): break should_be_indentation = n.indentation self._last_indentation_tos = n if n == node: break n = n.parent if self._new_statement: if type_ == 'newline': if indentation: self.add_issue(spacing, 291, 'Trailing whitespace') elif indentation != should_be_indentation: s = '%s %s' % (len(self._config.indentation), self._indentation_type) self.add_issue(part, 111, 'Indentation is not a multiple of ' + s) else: if value in '])}': should_be_indentation = node.bracket_indentation else: should_be_indentation = node.indentation if self._in_suite_introducer and indentation == \ node.get_latest_suite_node().indentation \ + self._config.indentation: self.add_issue(part, 129, "Line with same indent as next logical block") elif indentation != should_be_indentation: if not self._check_tabs_spaces(spacing) and part.value != '\n': if value in '])}': if node.type == IndentationTypes.VERTICAL_BRACKET: self.add_issue(part, 124, "Closing bracket does not match visual indentation") else: self.add_issue(part, 123, "Losing bracket does not match indentation of opening bracket's line") else: if len(indentation) < len(should_be_indentation): if node.type == IndentationTypes.VERTICAL_BRACKET: self.add_issue(part, 128, 'Continuation line under-indented for visual indent') elif node.type == IndentationTypes.BACKSLASH: self.add_issue(part, 122, 'Continuation line missing indentation or outdented') elif node.type == IndentationTypes.IMPLICIT: self.add_issue(part, 135, 'xxx') else: self.add_issue(part, 121, 'Continuation line under-indented for hanging indent') else: if node.type == IndentationTypes.VERTICAL_BRACKET: self.add_issue(part, 127, 'Continuation line over-indented for visual indent') elif node.type == IndentationTypes.IMPLICIT: self.add_issue(part, 136, 'xxx') else: self.add_issue(part, 126, 'Continuation line over-indented for hanging indent') else: self._check_spacing(part, spacing) self._check_line_length(part, spacing) # ------------------------------- # Finalizing. Updating the state. # ------------------------------- if value and value in '()[]{}' and type_ != 'error_leaf' \ and part.parent.type != 'error_node': if value in _OPENING_BRACKETS: self._indentation_tos = BracketNode( self._config, part, parent=self._indentation_tos, in_suite_introducer=self._in_suite_introducer ) else: assert node.type != IndentationTypes.IMPLICIT self._indentation_tos = self._indentation_tos.parent elif value in ('=', ':') and self._implicit_indentation_possible \ and part.parent.type in _IMPLICIT_INDENTATION_TYPES: indentation = node.indentation self._indentation_tos = ImplicitNode( self._config, part, parent=self._indentation_tos ) self._on_newline = type_ in ('newline', 'backslash', 'bom') self._previous_part = part self._previous_spacing = spacing def _check_line_length(self, part, spacing): if part.type == 'backslash': last_column = part.start_pos[1] + 1 else: last_column = part.end_pos[1] if last_column > self._config.max_characters \ and spacing.start_pos[1] <= self._config.max_characters : # Special case for long URLs in multi-line docstrings or comments, # but still report the error when the 72 first chars are whitespaces. report = True if part.type == 'comment': splitted = part.value[1:].split() if len(splitted) == 1 \ and (part.end_pos[1] - len(splitted[0])) < 72: report = False if report: self.add_issue( part, 501, 'Line too long (%s > %s characters)' % (last_column, self._config.max_characters), ) def _check_spacing(self, part, spacing): def add_if_spaces(*args): if spaces: return self.add_issue(*args) def add_not_spaces(*args): if not spaces: return self.add_issue(*args) spaces = spacing.value prev = self._previous_part if prev is not None and prev.type == 'error_leaf' or part.type == 'error_leaf': return type_ = part.type if '\t' in spaces: self.add_issue(spacing, 223, 'Used tab to separate tokens') elif type_ == 'comment': if len(spaces) < self._config.spaces_before_comment: self.add_issue(spacing, 261, 'At least two spaces before inline comment') elif type_ == 'newline': add_if_spaces(spacing, 291, 'Trailing whitespace') elif len(spaces) > 1: self.add_issue(spacing, 221, 'Multiple spaces used') else: if prev in _OPENING_BRACKETS: message = "Whitespace after '%s'" % part.value add_if_spaces(spacing, 201, message) elif part in _CLOSING_BRACKETS: message = "Whitespace before '%s'" % part.value add_if_spaces(spacing, 202, message) elif part in (',', ';') or part == ':' \ and part.parent.type not in _POSSIBLE_SLICE_PARENTS: message = "Whitespace before '%s'" % part.value add_if_spaces(spacing, 203, message) elif prev == ':' and prev.parent.type in _POSSIBLE_SLICE_PARENTS: pass # TODO elif prev in (',', ';', ':'): add_not_spaces(spacing, 231, "missing whitespace after '%s'") elif part == ':': # Is a subscript # TODO pass elif part in ('*', '**') and part.parent.type not in _NON_STAR_TYPES \ or prev in ('*', '**') \ and prev.parent.type not in _NON_STAR_TYPES: # TODO pass elif prev in _FACTOR and prev.parent.type == 'factor': pass elif prev == '@' and prev.parent.type == 'decorator': pass # TODO should probably raise an error if there's a space here elif part in _NEEDS_SPACE or prev in _NEEDS_SPACE: if part == '=' and part.parent.type in ('argument', 'param') \ or prev == '=' and prev.parent.type in ('argument', 'param'): if part == '=': param = part.parent else: param = prev.parent if param.type == 'param' and param.annotation: add_not_spaces(spacing, 252, 'Expected spaces around annotation equals') else: add_if_spaces(spacing, 251, 'Unexpected spaces around keyword / parameter equals') elif part in _BITWISE_OPERATOR or prev in _BITWISE_OPERATOR: add_not_spaces(spacing, 227, 'Missing whitespace around bitwise or shift operator') elif part == '%' or prev == '%': add_not_spaces(spacing, 228, 'Missing whitespace around modulo operator') else: message_225 = 'Missing whitespace between tokens' add_not_spaces(spacing, 225, message_225) elif type_ == 'keyword' or prev.type == 'keyword': add_not_spaces(spacing, 275, 'Missing whitespace around keyword') else: prev_spacing = self._previous_spacing if prev in _ALLOW_SPACE and spaces != prev_spacing.value \ and '\n' not in self._previous_leaf.prefix: message = "Whitespace before operator doesn't match with whitespace after" self.add_issue(spacing, 229, message) if spaces and part not in _ALLOW_SPACE and prev not in _ALLOW_SPACE: message_225 = 'Missing whitespace between tokens' #print('xy', spacing) #self.add_issue(spacing, 225, message_225) # TODO why only brackets? if part in _OPENING_BRACKETS: message = "Whitespace before '%s'" % part.value add_if_spaces(spacing, 211, message) def _analyse_non_prefix(self, leaf): typ = leaf.type if typ == 'name' and leaf.value in ('l', 'O', 'I'): if leaf.is_definition(): message = "Do not define %s named 'l', 'O', or 'I' one line" if leaf.parent.type == 'class' and leaf.parent.name == leaf: self.add_issue(leaf, 742, message % 'classes') elif leaf.parent.type == 'function' and leaf.parent.name == leaf: self.add_issue(leaf, 743, message % 'function') else: self.add_issuadd_issue(741, message % 'variables', leaf) elif leaf.value == ':': if isinstance(leaf.parent, (Flow, Scope)) and leaf.parent.type != 'lambdef': next_leaf = leaf.get_next_leaf() if next_leaf.type != 'newline': if leaf.parent.type == 'funcdef': self.add_issue(next_leaf, 704, 'Multiple statements on one line (def)') else: self.add_issue(next_leaf, 701, 'Multiple statements on one line (colon)') elif leaf.value == ';': if leaf.get_next_leaf().type in ('newline', 'endmarker'): self.add_issue(leaf, 703, 'Statement ends with a semicolon') else: self.add_issue(leaf, 702, 'Multiple statements on one line (semicolon)') elif leaf.value in ('==', '!='): comparison = leaf.parent index = comparison.children.index(leaf) left = comparison.children[index - 1] right = comparison.children[index + 1] for node in left, right: if node.type == 'keyword' or node.type == 'name': if node.value == 'None': message = "comparison to None should be 'if cond is None:'" self.add_issue(leaf, 711, message) break elif node.value in ('True', 'False'): message = "comparison to False/True should be 'if cond is True:' or 'if cond:'" self.add_issue(leaf, 712, message) break elif leaf.value in ('in', 'is'): comparison = leaf.parent if comparison.type == 'comparison' and comparison.parent.type == 'not_test': if leaf.value == 'in': self.add_issue(leaf, 713, "test for membership should be 'not in'") else: self.add_issue(leaf, 714, "test for object identity should be 'is not'") elif typ == 'string': # Checking multiline strings for i, line in enumerate(leaf.value.splitlines()[1:]): indentation = re.match(r'[ \t]*', line).group(0) start_pos = leaf.line + i, len(indentation) # TODO check multiline indentation. elif typ == 'endmarker': if self._newline_count >= 2: self.add_issue(leaf, 391, 'Blank line at end of file') def add_issue(self, node, code, message): if self._previous_leaf is not None: if search_ancestor(self._previous_leaf, 'error_node') is not None: return if self._previous_leaf.type == 'error_leaf': return if search_ancestor(node, 'error_node') is not None: return if code in (901, 903): # 901 and 903 are raised by the ErrorFinder. super(PEP8Normalizer, self).add_issue(node, code, message) else: # Skip ErrorFinder here, because it has custom behavior. super(ErrorFinder, self).add_issue(node, code, message) class PEP8NormalizerConfig(ErrorFinderConfig): normalizer_class = PEP8Normalizer """ Normalizing to PEP8. Not really implemented, yet. """ def __init__(self, indentation=' ' * 4, hanging_indentation=None, max_characters=79, spaces_before_comment=2): self.indentation = indentation if hanging_indentation is None: hanging_indentation = indentation self.hanging_indentation = hanging_indentation self.closing_bracket_hanging_indentation = '' self.break_after_binary = False self.max_characters = max_characters self.spaces_before_comment = spaces_before_comment # TODO this is not yet ready. #@PEP8Normalizer.register_rule(type='endmarker') class BlankLineAtEnd(Rule): code = 392 message = 'Blank line at end of file' def is_issue(self, leaf): return self._newline_count >= 2
from __future__ import print_function, division from sympy.core import symbols, Add, Dummy from sympy.core.compatibility import combinations_with_replacement from sympy.core.numbers import Rational from sympy.polys import cancel, ComputationFailed, parallel_poly_from_expr, reduced, Poly from sympy.polys.monomials import Monomial, monomial_div from sympy.polys.polyerrors import PolificationFailed from sympy.utilities.misc import debug def ratsimp(expr): """ Put an expression over a common denominator, cancel and reduce. Examples ======== >>> from sympy import ratsimp >>> from sympy.abc import x, y >>> ratsimp(1/x + 1/y) (x + y)/(x*y) """ f, g = cancel(expr).as_numer_denom() try: Q, r = reduced(f, [g], field=True, expand=False) except ComputationFailed: return f/g return Add(*Q) + cancel(r/g) def ratsimpmodprime(expr, G, *gens, **args): """ Simplifies a rational expression ``expr`` modulo the prime ideal generated by ``G``. ``G`` should be a Groebner basis of the ideal. >>> from sympy.simplify.ratsimp import ratsimpmodprime >>> from sympy.abc import x, y >>> eq = (x + y**5 + y)/(x - y) >>> ratsimpmodprime(eq, [x*y**5 - x - y], x, y, order='lex') (x**2 + x*y + x + y)/(x**2 - x*y) If ``polynomial`` is False, the algorithm computes a rational simplification which minimizes the sum of the total degrees of the numerator and the denominator. If ``polynomial`` is True, this function just brings numerator and denominator into a canonical form. This is much faster, but has potentially worse results. References ========== M. Monagan, R. Pearce, Rational Simplification Modulo a Polynomial Ideal, http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.163.6984 (specifically, the second algorithm) """ from sympy import solve quick = args.pop('quick', True) polynomial = args.pop('polynomial', False) debug('ratsimpmodprime', expr) # usual preparation of polynomials: num, denom = cancel(expr).as_numer_denom() try: polys, opt = parallel_poly_from_expr([num, denom] + G, *gens, **args) except PolificationFailed: return expr domain = opt.domain if domain.has_assoc_Field: opt.domain = domain.get_field() else: raise DomainError( "can't compute rational simplification over %s" % domain) # compute only once leading_monomials = [g.LM(opt.order) for g in polys[2:]] tested = set() def staircase(n): """ Compute all monomials with degree less than ``n`` that are not divisible by any element of ``leading_monomials``. """ if n == 0: return [1] S = [] for mi in combinations_with_replacement(range(len(opt.gens)), n): m = [0]*len(opt.gens) for i in mi: m[i] += 1 if all([monomial_div(m, lmg) is None for lmg in leading_monomials]): S.append(m) return [Monomial(s).as_expr(*opt.gens) for s in S] + staircase(n - 1) def _ratsimpmodprime(a, b, allsol, N=0, D=0): """ Computes a rational simplification of ``a/b`` which minimizes the sum of the total degrees of the numerator and the denominator. The algorithm proceeds by looking at ``a * d - b * c`` modulo the ideal generated by ``G`` for some ``c`` and ``d`` with degree less than ``a`` and ``b`` respectively. The coefficients of ``c`` and ``d`` are indeterminates and thus the coefficients of the normalform of ``a * d - b * c`` are linear polynomials in these indeterminates. If these linear polynomials, considered as system of equations, have a nontrivial solution, then `\frac{a}{b} \equiv \frac{c}{d}` modulo the ideal generated by ``G``. So, by construction, the degree of ``c`` and ``d`` is less than the degree of ``a`` and ``b``, so a simpler representation has been found. After a simpler representation has been found, the algorithm tries to reduce the degree of the numerator and denominator and returns the result afterwards. As an extension, if quick=False, we look at all possible degrees such that the total degree is less than *or equal to* the best current solution. We retain a list of all solutions of minimal degree, and try to find the best one at the end. """ c, d = a, b steps = 0 maxdeg = a.total_degree() + b.total_degree() if quick: bound = maxdeg - 1 else: bound = maxdeg while N + D <= bound: if (N, D) in tested: break tested.add((N, D)) M1 = staircase(N) M2 = staircase(D) debug('%s / %s: %s, %s' % (N, D, M1, M2)) Cs = symbols("c:%d" % len(M1), cls=Dummy) Ds = symbols("d:%d" % len(M2), cls=Dummy) ng = Cs + Ds c_hat = Poly( sum([Cs[i] * M1[i] for i in range(len(M1))]), opt.gens + ng) d_hat = Poly( sum([Ds[i] * M2[i] for i in range(len(M2))]), opt.gens + ng) r = reduced(a * d_hat - b * c_hat, G, opt.gens + ng, order=opt.order, polys=True)[1] S = Poly(r, gens=opt.gens).coeffs() sol = solve(S, Cs + Ds, particular=True, quick=True) if sol and not all([s == 0 for s in sol.values()]): c = c_hat.subs(sol) d = d_hat.subs(sol) # The "free" variables occuring before as parameters # might still be in the substituted c, d, so set them # to the value chosen before: c = c.subs(dict(list(zip(Cs + Ds, [1] * (len(Cs) + len(Ds)))))) d = d.subs(dict(list(zip(Cs + Ds, [1] * (len(Cs) + len(Ds)))))) c = Poly(c, opt.gens) d = Poly(d, opt.gens) if d == 0: raise ValueError('Ideal not prime?') allsol.append((c_hat, d_hat, S, Cs + Ds)) if N + D != maxdeg: allsol = [allsol[-1]] break steps += 1 N += 1 D += 1 if steps > 0: c, d, allsol = _ratsimpmodprime(c, d, allsol, N, D - steps) c, d, allsol = _ratsimpmodprime(c, d, allsol, N - steps, D) return c, d, allsol # preprocessing. this improves performance a bit when deg(num) # and deg(denom) are large: num = reduced(num, G, opt.gens, order=opt.order)[1] denom = reduced(denom, G, opt.gens, order=opt.order)[1] if polynomial: return (num/denom).cancel() c, d, allsol = _ratsimpmodprime( Poly(num, opt.gens), Poly(denom, opt.gens), []) if not quick and allsol: debug('Looking for best minimal solution. Got: %s' % len(allsol)) newsol = [] for c_hat, d_hat, S, ng in allsol: sol = solve(S, ng, particular=True, quick=False) newsol.append((c_hat.subs(sol), d_hat.subs(sol))) c, d = min(newsol, key=lambda x: len(x[0].terms()) + len(x[1].terms())) if not domain.has_Field: cn, c = c.clear_denoms(convert=True) dn, d = d.clear_denoms(convert=True) r = Rational(cn, dn) return (c*r.q)/(d*r.p)
#!/usr/bin/python import numpy as np import os import sys import math import matplotlib matplotlib.use('Pdf') import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.backends.backend_pdf import PdfPages import matplotlib.font_manager as fm ## 6-1-15 ## Simple code to explore NL as a function of beta, by using interleaving method import logging logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout) logger = logging.getLogger("tests_hsm_interleaving") from galsim.cdmodel import * from sim2 import * ## where all the BF stuff is from scipy import optimize from measurement_function import * #### This code is just to plot /Delta e / beta/ e vs magnitude for NL paper. ### Parameters k=1000 base_size=1*k + 1 ## ?? n=8 m_zero=20 # 24 #m_gal=20 gal_sigma=0.1 print "gal_sigma", gal_sigma pixel_scale=0.11 noise=20 type='nl' # 'nl' or 'bf' x_var='mag_and_ellipticity' #'magnitude' or 'beta' or 'mag_and_beta' profile_type='optical' # 'gaussian' or 'optical' label_type='lambda' # 'lambda' or 'ellipticity' #lam = 1380. # NB: don't use lambda - that's a reserved word. tel_diam = 2.4 obscuration_optical=0.3 beta0=3.566e-7 from collections import OrderedDict e_vec=[ (-0.05, 0.), (-0.025, 0.), (0.,0.), (0.025, 0.), (0.05, 0.), (0., -0.05), (0., -0.025), (0., 0.025), (0., 0.05)] mag_gal_vec=[18, 19, 20, 21, 22] wavelength_dict=OrderedDict([('Z087',0.869), ('Y106',1.060), ('J129',1.293), ('W149',1.485), ('H158',1.577), ('F184',1.842)]) # in microns alpha=0.6 """ lam Z087 mag_gal_vec, slope_dict2[lam][0] , mag_gal_vec, slope_dict2[lam][1] [18, 19, 20, 21, 22] [75742.870427579968, 28184.622313400596, 10930.675099636968, 4306.7322714014599, 1707.8251766452668] [18, 19, 20, 21, 22] [46734.377184175639, 17024.21550656106, 6550.3354381991485, 2573.0948113737395, 1018.6509485882863] lam Y106 mag_gal_vec, slope_dict2[lam][0] , mag_gal_vec, slope_dict2[lam][1] [18, 19, 20, 21, 22] [34010.730391860387, 12391.134553428625, 4769.7505454131397, 1873.769383607281, 742.35982843838917] [18, 19, 20, 21, 22] [16792.337753104806, 5842.1631550362963, 2210.2871739367233, 862.43111930308351, 340.69945229407904] lam J129 mag_gal_vec, slope_dict2[lam][0] , mag_gal_vec, slope_dict2[lam][1] [18, 19, 20, 21, 22] [2081.6495970849842, 610.11067968176383, 214.10345620322943, 80.857394587802432, 31.572887410428091] [18, 19, 20, 21, 22] [-4207.5081001661547, -1755.5098475711957, -707.1455227381914, -282.54122661853677, -112.62130721933471] lam W149 mag_gal_vec, slope_dict2[lam][0] , mag_gal_vec, slope_dict2[lam][1] [18, 19, 20, 21, 22] [-1592.3563997057356, -629.75642983168837, -249.94456341030767, -99.291328474215405, -39.518342722253216] [18, 19, 20, 21, 22] [-2290.9654828275197, -903.30454718955991, -358.36928529396317, -142.08097811688063, -56.513617906103555] lam H158 mag_gal_vec, slope_dict2[lam][0] , mag_gal_vec, slope_dict2[lam][1] [18, 19, 20, 21, 22] [-6150.5703896285277, -2378.3714611299974, -935.60571825874524, -370.61280809129659, -147.20284187141385] [18, 19, 20, 21, 22] [-7708.1960180413962, -2965.5113576244357, -1164.6449589701208, -461.15279359937051, -182.91858877565352] lam F184 mag_gal_vec, slope_dict2[lam][0] , mag_gal_vec, slope_dict2[lam][1] [18, 19, 20, 21, 22] [-3699.0422391040042, -1440.0794503965601, -568.07319186230689, -225.35603566550293, -89.752006470152082] [18, 19, 20, 21, 22] [-4015.0815292432935, -1561.8699682539068, -616.18170001934766, -244.46452736574233, -97.058721196402871] """ pp=PdfPages("test_bias_NL_vs_flux.pdf") print "Output PDF: test_bias_NL_vs_flux.pdf" #### PLOTS #### Do the plotting here plt.minorticks_on() #plt.tight_layout() ### We do not have matplotlib 1.1, with the 'style' package. Modify the matplotlibrc file parameters instead import matplotlib as mpl mpl.rc('lines', linewidth=1, color='black', linestyle='-') mpl.rc('font', family='serif',weight='normal', size=10.0 ) mpl.rc('text', color='black', usetex=False) mpl.rc('axes', edgecolor='black', linewidth=1, grid=False, titlesize=11, labelsize=11, labelweight='normal',labelcolor='black') mpl.rc('axes.formatter', limits=[-4,4]) mpl.rcParams['xtick.major.size']=7 mpl.rcParams['xtick.minor.size']=4 mpl.rcParams['xtick.major.pad']=8 mpl.rcParams['xtick.minor.pad']=8 mpl.rcParams['xtick.labelsize']= '11' mpl.rcParams['xtick.minor.width']= 1.0 mpl.rcParams['xtick.major.width']= 1.0 mpl.rcParams['ytick.major.size']=7 mpl.rcParams['ytick.minor.size']=4 mpl.rcParams['ytick.major.pad']=8 mpl.rcParams['ytick.minor.pad']=8 mpl.rcParams['ytick.labelsize']= '11' mpl.rcParams['ytick.minor.width']= 1.0 mpl.rcParams['ytick.major.width']= 1.0 mpl.rc ('legend', numpoints=1, fontsize='11', shadow=False, frameon=False) ## Plot parameters plt.subplots_adjust(hspace=0.01, wspace=0.01) prop = fm.FontProperties(size=9) marker_size=7 loc_label = "upper right" visible_x, visible_y = True, True grid=False ymin, ymax = -0.0001, 0.0001 m_req=1e-3 c_req=1e-4 color_vec=['r', 'y', 'g', 'c', 'b', 'm', 'k'] #color_dict={0.0:'r', 0.025:'k', 0.05:'b', 0.075:'m', 0.08:'c', 0.1:'g'} color_vec_lam=['m','b', 'c', 'g', 'y', 'r'] color_dict_e={} for i,e in enumerate(e_vec): color_dict_e[e]=color_vec[i%len(color_vec)] color_dict_mag={} for i,m_gal in enumerate(mag_gal_vec): color_dict_mag[m_gal]=color_vec[i%len(color_vec)] color_dict_lam={} for i,lam in enumerate(wavelength_dict): color_dict_lam[lam]=color_vec_lam[i%len(color_vec_lam)] x_vec=mag_gal_vec x_label=r"mag_object" string= r"Non-linearity: $f=x-\beta x^{2}$ " + "\n" + "OpticalPSF (tel_diam=%g m, obscuration=%g) * Pixel (%g/%g arcsec/pix), no noise. "%(tel_diam, obscuration_optical, pixel_scale, n) def plot_function_e_and_r (fig, x_vec, y1_vec, y2_vec, y3_vec, x1label='', x2label='', y1label=r"$\Delta$e", y2label=r"$\Delta$R/R", lam_key='H158', e_key=(0.0, 0.0)): if len(x2label) == 0: x2label=x1label if label_type == 'lambda': color_fmt=color_dict_lam[lam_key] label_e1='' label_e2='' label='%s'%(lam_key) elif label_type == 'ellipticity': color_fmt=color_dict_lam[lam_key] label_e1='%g' %e_key[0] label_e2='%g' %e_key[1] label='(e1,e2)=(%g,%g)'%(e_key[0], e_key[1]) else: print "wrong label type." sys.exit(1) ax = fig.add_subplot (211) ax.errorbar( x_vec, y1_vec, yerr=None, ecolor = color_fmt, label=label_e1, fmt=color_fmt+'s-', markersize=marker_size, alpha=alpha) #ax.errorbar( x_vec, y2_vec, yerr=None, ecolor = color_fmt, label=label_e2, fmt=color_fmt+'x-', markersize=marker_size, alpha=alpha) plt.axhline(y=0.,color='k',ls='solid') if e_key[0] == 0.0 and e_key[1] == 0.0 and label_type == 'ellipticity': plt.axhline(y=1e-5, color='r',ls='-', label='1e-5') # requirement #plt.axhspan(-m_req, m_req, facecolor='0.5', alpha=0.3) ax.set_xticklabels([int(x) for x in ax.get_xticks()], visible=visible_x) lx=ax.set_xlabel(x1label, visible=visible_x) #lx.set_fontsize(font_size) ax.set_xscale('linear') ax.set_yticklabels(ax.get_yticks(), visible= visible_y) ly=ax.set_ylabel(y1label, visible=visible_y) #ly.set_fontsize(font_size) ax.set_yscale('linear') #plt.ylim ([-1e-4, 8e4]) #plt.ylim ([ymin, ymax]) xmin, xmax=plt.xlim() delta=(xmax-xmin) plt.xlim ([xmin - 0.03*delta, xmax + 0.03*delta]) if label_type == 'ellipticity': plt.title(lam_key+" (%g $\mu$m)"%wavelength_dict[lam], fontsize=11) #if plot_pos== 321: ax.legend(loc=loc_label , fancybox=True, ncol=1, numpoints=1, prop = prop) #plt.grid(grid, which='both', ls='-', alpha=0.5) plt.grid(grid) ax = fig.add_subplot (212) if e_key[0] == 0.0 and e_key[1] == 0.0: ax.errorbar( x_vec, y3_vec, yerr=None, ecolor = color_fmt, label=label, fmt=color_fmt+'o-', markersize=marker_size, alpha=alpha) #ax.errorbar( x_vec, theory_delta_r_gauss, yerr=None, ecolor = 'k', label='theory Gauss', fmt='r-', markersize=marker_size, alpha=1.) plt.axhline(y=0.,color='k',ls='solid') if e_key[0] == 0.0 and e_key[1] == 0.0 and lam_key == 'H158' and label_type == 'ellipticity': plt.axhline(y=1e-4, color='r',ls='-', label='1e-4') # requirement if x_var == 'magnitude' and profile_type == 'gaussian': ax.errorbar(x_vec, ratio_vec, yerr=None, ecolor = 'b', label='Theory', fmt='bo-', markersize=marker_size, alpha=alpha) #plt.axhspan(-m_req, m_req, facecolor='0.5', alpha=0.3) ax.set_xticklabels([int(x) for x in ax.get_xticks()], visible=visible_x) lx=ax.set_xlabel(x2label, visible=visible_x) #lx.set_fontsize(font_size) ax.set_xscale('linear') ax.set_yticklabels(ax.get_yticks(), visible= visible_y) ly=ax.set_ylabel(y2label, visible=visible_y) #ly.set_fontsize(font_size) ax.set_yscale('linear') #plt.ylim ([-1e-4, 8e4]) #plt.ylim ([ymin, ymax]) xmin, xmax=plt.xlim() delta=(xmax-xmin) plt.xlim ([xmin - 0.03*delta, xmax + 0.03*delta]) #if profile_type=='optical': # plt.ylim ([0., 0.040]) # plt.xlim ([17.5, 24.5]) #plt.ylim([0., 0.18e-4]) if label_type == 'ellipticity': plt.title(lam_key+" (%g $\mu$m)"%wavelength_dict[lam], fontsize=11) #if plot_pos== 324: ax.legend(loc=loc_label , fancybox=True, ncol=1, numpoints=1, prop = prop) """ #Inset with zoom subpos = [0.45, 0.45, 0.475, 0.35] subax1 = add_subplot_axes(ax,subpos) if e_key[0] == 0.0 and e_key[1] == 0.0 and not x_var == 'mag_and_beta': # does not depend on e, just plot once subax1.plot (x_vec, y3_vec, color_fmt+'o-', markersize=marker_size, alpha=alpha) if profile_type == 'gaussian': subax1.plot (x_vec, ratio_vec,'bo-', markersize=marker_size, alpha=alpha) subax1.axhline(y=1e-4, color='r',ls='-') subax1.axhline(y=0.,color='k',ls='solid') subax1.set_yticklabels(subax1.get_yticks(), size=9, visible=True) subax1.set_xticklabels(subax1.get_xticks(), size=9, visible=True) plt.ylim([-1e-4, 3e-4]) if profile_type == 'optical': plt.xlim ([21, 24.5]) #else: # plt.xlim ([21.8, 24.2]) # subax1.set_yticklabels(subax1.get_yticks(), size=5, visible=True) # subax1.set_xticklabels(subax1.get_xticks(), size=5, visible=True) """ a=[75742.870427579968, 28184.622313400596, 10930.675099636968, 4306.7322714014599, 1707.8251766452668] b=[46734.377184175639, 17024.21550656106, 6550.3354381991485, 2573.0948113737395, 1018.6509485882863] c=[34010.730391860387, 12391.134553428625, 4769.7505454131397, 1873.769383607281, 742.35982843838917] d=[16792.337753104806, 5842.1631550362963, 2210.2871739367233, 862.43111930308351, 340.69945229407904] e=[2081.6495970849842, 610.11067968176383, 214.10345620322943, 80.857394587802432, 31.572887410428091] f=[-4207.5081001661547, -1755.5098475711957, -707.1455227381914, -282.54122661853677, -112.62130721933471] g=[-1592.3563997057356, -629.75642983168837, -249.94456341030767, -99.291328474215405, -39.518342722253216] h=[-2290.9654828275197, -903.30454718955991, -358.36928529396317, -142.08097811688063, -56.513617906103555] i=[-6150.5703896285277, -2378.3714611299974, -935.60571825874524, -370.61280809129659, -147.20284187141385] j=[-7708.1960180413962, -2965.5113576244357, -1164.6449589701208, -461.15279359937051, -182.91858877565352] k=[-3699.0422391040042, -1440.0794503965601, -568.07319186230689, -225.35603566550293, -89.752006470152082] l=[-4015.0815292432935, -1561.8699682539068, -616.18170001934766, -244.46452736574233, -97.058721196402871] slope_dict2={'Z087':[a,b], 'Y106':[c,d], 'J129':[e,f], 'W149':[g,h], 'H158':[i,j], 'F184':[k,l]} fig = plt.figure() for lam in slope_dict2: plot_function_e_and_r (fig, mag_gal_vec, slope_dict2[lam][0] , mag_gal_vec, slope_dict2[lam][1], x1label=x_label, y1label=r"$\Delta e_1$/$\beta$/$e_1$", y2label=r"$\Delta e_2$/$\beta$/$e_2$", lam_key=lam) plt.suptitle(string, fontsize=13) fig.tight_layout() plt.subplots_adjust(top=0.85) pp.savefig(fig) plt.close() pp.close()
#!/usr/bin/env python3 # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Create e2e test definitions. Usage example: In $GOPATH/src/k8s.io/test-infra, $ make -C releng generate-tests \ --yaml-config-path=releng/test_config.yaml \ """ import argparse import hashlib import os import ruamel.yaml yaml = ruamel.yaml.YAML(typ='rt') yaml.width = float("inf") PROW_CONFIG_TEMPLATE = """ tags: - generated # AUTO-GENERATED by releng/generate_tests.py - DO NOT EDIT! interval: cron: labels: preset-service-account: "true" preset-k8s-ssh: "true" name: spec: containers: - args: env: image: gcr.io/k8s-staging-test-infra/kubekins-e2e:v20220307-7fa60e9872-master resources: requests: cpu: 1000m memory: 3Gi limits: cpu: 1000m memory: 3Gi """ E2E_TESTGRID_CONFIG_TEMPLATE = """ name: gcs_prefix: column_header: - configuration_value: node_os_image - configuration_value: master_os_image - configuration_value: Commit - configuration_value: infra-commit """ GCS_LOG_PREFIX = "kubernetes-jenkins/logs/" COMMENT = 'AUTO-GENERATED by releng/generate_tests.py - DO NOT EDIT.' def get_sha1_hash(data): """Returns the SHA1 hash of the specified data.""" sha1_hash = hashlib.sha1() sha1_hash.update(data.encode('utf-8')) return sha1_hash.hexdigest() def substitute(job_name, lines): """Replace '${job_name_hash}' in lines with the SHA1 hash of job_name.""" return [line.replace('${job_name_hash}', get_sha1_hash(job_name)[:10]) \ for line in lines] def get_args(job_name, field): """Returns a list of args for the given field.""" if not field: return [] return substitute(job_name, field.get('args', [])) def write_prow_configs_file(output_file, job_defs): """Writes the Prow configurations into output_file.""" print(f'writing prow configuration to: {output_file}') with open(output_file, 'w') as fp: yaml.dump(job_defs, fp) def write_testgrid_config_file(output_file, testgrid_config): """Writes the TestGrid test group configurations into output_file.""" print(f'writing testgrid configuration to: {output_file}') with open(output_file, 'w') as fp: fp.write('# ' + COMMENT + '\n\n') yaml.dump(testgrid_config, fp) def apply_job_overrides(envs_or_args, job_envs_or_args): '''Applies the envs or args overrides defined in the job level''' original_envs_or_args = envs_or_args[:] for job_env_or_arg in job_envs_or_args: name = job_env_or_arg.split('=', 1)[0] env_or_arg = next( (x for x in original_envs_or_args if (x.strip().startswith('%s=' % name) or x.strip() == name)), None) if env_or_arg: envs_or_args.remove(env_or_arg) envs_or_args.append(job_env_or_arg) class E2ENodeTest: def __init__(self, job_name, job, config): self.job_name = job_name self.job = job self.common = config['nodeCommon'] self.images = config['nodeImages'] self.k8s_versions = config['nodeK8sVersions'] self.test_suites = config['nodeTestSuites'] def __get_job_def(self, args): """Returns the job definition from the given args.""" return { 'scenario': 'kubernetes_e2e', 'args': args, 'sigOwners': self.job.get('sigOwners') or ['UNNOWN'], # Indicates that this job definition is auto-generated. 'tags': ['generated'], '_comment': COMMENT, } def __get_prow_config(self, test_suite, k8s_version): """Returns the Prow config for the job from the given fields.""" prow_config = yaml.load(PROW_CONFIG_TEMPLATE) prow_config['name'] = self.job_name # use cluster from test_suite, or job, or not at all if 'cluster' in test_suite: prow_config['cluster'] = test_suite['cluster'] elif 'cluster' in self.job: prow_config['cluster'] = self.job['cluster'] # use resources from test_suite, or job, or default if 'resources' in test_suite: prow_config['resources'] = test_suite['resources'] elif 'resources' in self.job: prow_config['resources'] = self.job['resources'] # pull interval or cron from job if 'interval' in self.job: del prow_config['cron'] prow_config['interval'] = self.job['interval'] elif 'cron' in self.job: del prow_config['cron'] prow_config['cron'] = self.job['cron'] else: raise Exception("no interval or cron definition found") # Assumes that the value in --timeout is of minutes. timeout = int(next( x[10:-1] for x in test_suite['args'] if ( x.startswith('--timeout=')))) container = prow_config['spec']['containers'][0] if not container['args']: container['args'] = [] if not container['env']: container['env'] = [] # Prow timeout = job timeout + 20min container['args'].append('--timeout=%d' % (timeout + 20)) container['args'].extend(k8s_version.get('args', [])) container['args'].append('--root=/go/src') container['env'].extend([{'name':'GOPATH', 'value': '/go'}]) # Specify the appropriate kubekins-e2e image. This allows us to use a # specific image (containing a particular Go version) to build and # trigger the node e2e test to avoid issues like # https://github.com/kubernetes/kubernetes/issues/43534. if k8s_version.get('prowImage', None): container['image'] = k8s_version['prowImage'] return prow_config def generate(self): '''Returns the job and the Prow configurations for this test.''' print(f'generating e2enode job: {self.job_name}') fields = self.job_name.split('-') if len(fields) != 6: raise ValueError('Expected 6 fields in job name', self.job_name) image = self.images[fields[3]] k8s_version = self.k8s_versions[fields[4][3:]] test_suite = self.test_suites[fields[5]] # envs are disallowed in node e2e tests. if 'envs' in self.common or 'envs' in image or 'envs' in test_suite: raise ValueError( 'envs are disallowed in node e2e test', self.job_name) # Generates args. args = [] args.extend(get_args(self.job_name, self.common)) args.extend(get_args(self.job_name, image)) args.extend(get_args(self.job_name, test_suite)) # Generates job config. job_config = self.__get_job_def(args) # Generates prow config. prow_config = self.__get_prow_config(test_suite, k8s_version) # Combine --node-args node_args = [] job_args = [] for arg in job_config['args']: if '--node-args=' in arg: node_args.append(arg.split('=', 1)[1]) else: job_args.append(arg) if node_args: flag = '--node-args=' for node_arg in node_args: flag += '%s ' % node_arg job_args.append(flag.strip()) job_config['args'] = job_args if image.get('testgrid_prefix') is not None: dashboard = '%s-%s-%s' % (image['testgrid_prefix'], fields[3], fields[4]) annotations = prow_config.setdefault('annotations', {}) annotations['testgrid-dashboards'] = dashboard tab_name = '%s-%s-%s' % (fields[3], fields[4], fields[5]) annotations['testgrid-tab-name'] = tab_name return job_config, prow_config, None class E2ETest: def __init__(self, output_dir, job_name, job, config): self.env_filename = os.path.join(output_dir, '%s.env' % job_name) self.job_name = job_name self.job = job self.common = config['common'] self.cloud_providers = config['cloudProviders'] self.images = config['images'] self.k8s_versions = config['k8sVersions'] self.test_suites = config['testSuites'] def __get_job_def(self, args): """Returns the job definition from the given args.""" return { 'scenario': 'kubernetes_e2e', 'args': args, 'sigOwners': self.job.get('sigOwners') or ['UNNOWN'], # Indicates that this job definition is auto-generated. 'tags': ['generated'], '_comment': COMMENT, } def __get_prow_config(self, test_suite): """Returns the Prow config for the e2e job from the given fields.""" prow_config = yaml.load(PROW_CONFIG_TEMPLATE) prow_config['name'] = self.job_name # use cluster from test_suite, or job, or not at all if 'cluster' in test_suite: prow_config['cluster'] = test_suite['cluster'] elif 'cluster' in self.job: prow_config['cluster'] = self.job['cluster'] # use resources from test_suite, or job, or default if 'resources' in test_suite: prow_config['resources'] = test_suite['resources'] elif 'resources' in self.job: prow_config['resources'] = self.job['resources'] if 'interval' in self.job: del prow_config['cron'] prow_config['interval'] = self.job['interval'] elif 'cron' in self.job: del prow_config['interval'] prow_config['cron'] = self.job['cron'] else: raise Exception("no interval or cron definition found") # Assumes that the value in --timeout is of minutes. timeout = int(next( x[10:-1] for x in test_suite['args'] if ( x.startswith('--timeout=')))) container = prow_config['spec']['containers'][0] if not container['args']: container['args'] = [] container['args'].append('--bare') # Prow timeout = job timeout + 20min container['args'].append('--timeout=%d' % (timeout + 20)) return prow_config def __get_testgrid_config(self): tg_config = yaml.load(E2E_TESTGRID_CONFIG_TEMPLATE) tg_config['name'] = self.job_name tg_config['gcs_prefix'] = GCS_LOG_PREFIX + self.job_name return tg_config def initialize_dashboards_with_release_blocking_info(self, version): dashboards = [] if self.job.get('releaseBlocking'): dashboards.append('sig-release-%s-blocking' % version) elif self.job.get('releaseInforming'): dashboards.append('sig-release-%s-informing' % version) else: dashboards.append('sig-release-generated') return dashboards def generate(self): '''Returns the job and the Prow configurations for this test.''' print(f'generating e2e job: {self.job_name}') fields = self.job_name.split('-') if len(fields) != 7: raise ValueError('Expected 7 fields in job name', self.job_name) cloud_provider = self.cloud_providers[fields[3]] image = self.images[fields[4]] k8s_version = self.k8s_versions[fields[5][3:]] test_suite = self.test_suites[fields[6]] # Generates args. args = [] args.extend(get_args(self.job_name, self.common)) args.extend(get_args(self.job_name, cloud_provider)) args.extend(get_args(self.job_name, image)) args.extend(get_args(self.job_name, k8s_version)) args.extend(get_args(self.job_name, test_suite)) # Generates job config. job_config = self.__get_job_def(args) # Generates Prow config. prow_config = self.__get_prow_config(test_suite) tg_config = self.__get_testgrid_config() annotations = prow_config.setdefault('annotations', {}) tab_name = '%s-%s-%s-%s' % (fields[3], fields[4], fields[5], fields[6]) annotations['testgrid-tab-name'] = tab_name dashboards = self.initialize_dashboards_with_release_blocking_info(k8s_version['version']) if image.get('testgrid_prefix') is not None: dashboard = '%s-%s-%s' % (image['testgrid_prefix'], fields[4], fields[5]) dashboards.append(dashboard) annotations['testgrid-dashboards'] = ', '.join(dashboards) if 'testgridNumFailuresToAlert' in self.job: annotations['testgrid-num-failures-to-alert'] = ('%s' % self.job['testgridNumFailuresToAlert']) return job_config, prow_config, tg_config def for_each_job(output_dir, job_name, job, yaml_config): """Returns the job config and the Prow config for one test job.""" fields = job_name.split('-') if len(fields) < 3: raise ValueError('Expected at least 3 fields in job name', job_name) job_type = fields[2] # Generates configurations. if job_type == 'e2e': generator = E2ETest(output_dir, job_name, job, yaml_config) elif job_type == 'e2enode': generator = E2ENodeTest(job_name, job, yaml_config) else: raise ValueError(f'Job {job_name} has unexpected job type ', job_type) job_config, prow_config, testgrid_config = generator.generate() # Applies job-level overrides. apply_job_overrides(job_config['args'], get_args(job_name, job)) # merge job_config into prow_config args = prow_config['spec']['containers'][0]['args'] args.append('--scenario=' + job_config['scenario']) args.append('--') args.extend(job_config['args']) return prow_config, testgrid_config def main(yaml_config_path, output_dir, testgrid_output_path): """Creates test job definitions. Converts the test configurations in yaml_config_path to the job definitions in output_dir/generated.yaml. """ # TODO(yguo0905): Validate the configurations from yaml_config_path. with open(yaml_config_path) as fp: yaml_config = yaml.load(fp) output_config = {} output_config['periodics'] = [] testgrid_config = {'test_groups': []} job_names = sorted(yaml_config['jobs'].keys()) for job_name in job_names: # Get the envs and args for each job defined under "jobs". prow, testgrid = for_each_job( output_dir, job_name, yaml_config['jobs'][job_name], yaml_config) output_config['periodics'].append(prow) if testgrid is not None: testgrid_config['test_groups'].append(testgrid) # Write the job definitions to --output-dir/generated.yaml write_prow_configs_file(output_dir + 'generated.yaml', output_config) write_testgrid_config_file(testgrid_output_path, testgrid_config) if __name__ == '__main__': PARSER = argparse.ArgumentParser( description='Create test definitions from the given yaml config') PARSER.add_argument('--yaml-config-path', help='Path to config.yaml') PARSER.add_argument( '--output-dir', help='Prowjob config output dir', default='config/jobs/kubernetes/generated/') PARSER.add_argument( '--testgrid-output-path', help='Path to testgrid output file', default='config/testgrids/generated-test-config.yaml') ARGS = PARSER.parse_args() main( ARGS.yaml_config_path, ARGS.output_dir, ARGS.testgrid_output_path)
# Copyright (c) 2007 Ruben Reifenberg # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Besides the thread(s) reading the socket, there is a single worker thread which feeds the log server. To stop the threaded server process (Ctrl-C prbably won't work) use the specified module variables to regularly end it without log line loss, or kill the process. """ from __future__ import absolute_import # Py3Migration - required by Python 2.7 for socketserver import import sys import logging.handlers try: import socketserver as py_socketserver except ImportError: # Py3Migration import SocketServer as py_socketserver import struct import threading import collections import socket import datetime import time from rrlog.globalconst import remoteloads,warn from rrlog import globalconst maxlen_jobdataq = 100000 socketreceiver = None # .abort=True in the LogRecordSocketReceiver to exit rrlog_server = None jobdataq = collections.deque() # alt: Queue.Queue processq_stop = False # to exit the worker thread counter = 0 def _i_am_orphan(): # siehe rrlog.mail.py, wenn wirklich genutzt, kann sie zu rrlog.tool.i_am_orphan_thread werden current = threading.currentThread() for thread in threading.enumerate(): if not thread.isDaemon() and (thread is not current): return False return True def processq(): """ Loop which forever pops the oldest (left) element from the jobdataq and calls the log server. The global variable processq_stop=True ends the loop, but not before the jobdataq is found empty. """ while True: try: pickled = jobdataq.popleft() except IndexError: if processq_stop or _i_am_orphan(): return else: try: jobdata = remoteloads(pickled) except Exception as e: # no exit. there may be a process sending erroneously to me warn("serialization protocol error: deserialize jobdata failed; a job is skipped (%s)"%e) else: if "ping" == jobdata: # Someone wants to know whether I'm alive pass else: rrlog_server.log(jobdata) class LogRecordStreamHandler(py_socketserver.StreamRequestHandler): """Handler for a streaming logging request. This basically logs the record using whatever logging policy is configured locally. """ def handle(self): """ Handle multiple requests - each expected to be a 4-byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. """ #raise ValueError("merkt der Client das?") nein, der Client merkt nix, und blockiert auch nicht. overfull_warned = False # i've already emitted an overfull warning while True: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] try: chunk = self.connection.recv(slen) except MemoryError: # already seen when connecting an XMLRPC client by mistake ... sys.stderr.write("corrupt data, did you use the right socket client ?") raise while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) # obj = self.unPickle(chunk) # self.rrlog_server.log(obj) if len(jobdataq) <= maxlen_jobdataq: # deque of python2.6 has maxlen but that throws away oldest elements jobdataq.append(chunk) elif not overfull_warned: # this is not exact with multiple threads (e.g.ThreadedTCPServer). # But without harm because, when getting near the queue size limit, arbitrary jobs will be skipped in any case. warn("skipping job because queue len > %s. Subsequent warnings of that type are disabled."%maxlen_jobdataq) # may appear >1 times with >1 threads overfull_warned = True # there is also SocketServer.ThreadingTCPServer # but should a log really load multiple cpu cores ? # moreover, that would require the rotation to be threadsafe tcpservercls = py_socketserver.TCPServer class LogRecordSocketReceiver(tcpservercls): """simple TCP socket-based logging receiver suitable for testing. """ allow_reuse_address = 1 def __init__(self, host, ports, handler=LogRecordStreamHandler): for port in ports: try: tcpservercls.__init__(self, (host, port), handler) except socket.error as e: warn("port %s not available:%s"%(port, e)) port = None if not port: raise self.abort = 0 self.timeout = 1 self.logname = None self.host = host self.port = port def serve_until_stopped(self): import select abort = 0 while not abort: rd, wr, ex = select.select( [self.socket.fileno()], [], [], self.timeout ) if rd: self.handle_request() abort = self.abort def startServer(logServer, host="localhost", ports=(globalconst.DEFAULTPORT_SOCKET,)): """ Run the given logServer as an xmlrpc server (forever). :param ports: sequence of portnumbers, at least one number. The first port available is used. Multiple ports is for development, where sometimes ports may remain blocked. In production, better use a single port only, for best control over which server/client pairs are married. """ global socketreceiver global rrlog_server rrlog_server = logServer socketreceiver = LogRecordSocketReceiver(host, ports) # print("About to start TCP server...") import os t = threading.Thread(target=processq) t.start() print("%s:log server ready. Available at host,port: %s.Pid=%s, thread.ident=%s"%( datetime.datetime.now(), str( (socketreceiver.host, socketreceiver.port)), os.getpid(), t.ident )) socketreceiver.serve_until_stopped()
#!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Bitcoin P2P network half-a-node. This python code was modified from ArtForz' public domain half-a-node, as found in the mini-node branch of http://github.com/jgarzik/pynode. P2PConnection: A low-level connection object to a node's P2P interface P2PInterface: A high-level interface object for communicating to a node over P2P P2PDataStore: A p2p interface class that keeps a store of transactions and blocks and can respond correctly to getdata and getheaders messages""" import asyncio from collections import defaultdict from io import BytesIO import logging import struct import sys import threading from test_framework.messages import ( CBlockHeader, MIN_VERSION_SUPPORTED, msg_addr, msg_block, MSG_BLOCK, msg_blocktxn, msg_cmpctblock, msg_feefilter, msg_getaddr, msg_getblocks, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_mempool, msg_notfound, msg_ping, msg_pong, msg_sendcmpct, msg_sendheaders, msg_tx, MSG_TX, MSG_TYPE_MASK, msg_verack, msg_version, NODE_NETWORK, NODE_WITNESS, sha256, ) from test_framework.util import wait_until logger = logging.getLogger("TestFramework.mininode") MESSAGEMAP = { b"addr": msg_addr, b"block": msg_block, b"blocktxn": msg_blocktxn, b"cmpctblock": msg_cmpctblock, b"feefilter": msg_feefilter, b"getaddr": msg_getaddr, b"getblocks": msg_getblocks, b"getblocktxn": msg_getblocktxn, b"getdata": msg_getdata, b"getheaders": msg_getheaders, b"headers": msg_headers, b"inv": msg_inv, b"mempool": msg_mempool, b"notfound": msg_notfound, b"ping": msg_ping, b"pong": msg_pong, b"sendcmpct": msg_sendcmpct, b"sendheaders": msg_sendheaders, b"tx": msg_tx, b"verack": msg_verack, b"version": msg_version, } MAGIC_BYTES = { "mainnet": b"\xf9\xbe\xb4\xd9", # mainnet "testnet3": b"\x0b\x11\x09\x07", # testnet3 "regtest": b"\xfa\xbf\xb5\xda", # regtest } class P2PConnection(asyncio.Protocol): """A low-level connection object to a node's P2P interface. This class is responsible for: - opening and closing the TCP connection to the node - reading bytes from and writing bytes to the socket - deserializing and serializing the P2P message header - logging messages as they are sent and received This class contains no logic for handing the P2P message payloads. It must be sub-classed and the on_message() callback overridden.""" def __init__(self): # The underlying transport of the connection. # Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe self._transport = None @property def is_connected(self): return self._transport is not None def peer_connect(self, dstaddr, dstport, *, net): assert not self.is_connected self.dstaddr = dstaddr self.dstport = dstport # The initial message to send after the connection was made: self.on_connection_send_msg = None self.recvbuf = b"" self.magic_bytes = MAGIC_BYTES[net] logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport)) loop = NetworkThread.network_event_loop conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport) conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe) return conn_gen def peer_disconnect(self): # Connection could have already been closed by other end. NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort()) # Connection and disconnection methods def connection_made(self, transport): """asyncio callback when a connection is opened.""" assert not self._transport logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport)) self._transport = transport if self.on_connection_send_msg: self.send_message(self.on_connection_send_msg) self.on_connection_send_msg = None # Never used again self.on_open() def connection_lost(self, exc): """asyncio callback when a connection is closed.""" if exc: logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc)) else: logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport)) self._transport = None self.recvbuf = b"" self.on_close() # Socket read methods def data_received(self, t): """asyncio callback when data is read from the socket.""" if len(t) > 0: self.recvbuf += t self._on_data() def _on_data(self): """Try to read P2P messages from the recv buffer. This method reads data from the buffer in a loop. It deserializes, parses and verifies the P2P header, then passes the P2P payload to the on_message callback for processing.""" try: while True: if len(self.recvbuf) < 4: return if self.recvbuf[:4] != self.magic_bytes: raise ValueError("magic bytes mismatch: {} != {}".format(repr(self.magic_bytes), repr(self.recvbuf))) if len(self.recvbuf) < 4 + 12 + 4 + 4: return command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] checksum = self.recvbuf[4+12+4:4+12+4+4] if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen: return msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen] th = sha256(msg) h = sha256(th) if checksum != h[:4]: raise ValueError("got bad checksum " + repr(self.recvbuf)) self.recvbuf = self.recvbuf[4+12+4+4+msglen:] if command not in MESSAGEMAP: raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg))) f = BytesIO(msg) t = MESSAGEMAP[command]() t.deserialize(f) self._log_message("receive", t) self.on_message(t) except Exception as e: logger.exception('Error reading message:', repr(e)) raise def on_message(self, message): """Callback for processing a P2P payload. Must be overridden by derived class.""" raise NotImplementedError # Socket write methods def send_message(self, message): """Send a P2P message over the socket. This method takes a P2P payload, builds the P2P header and adds the message to the send buffer to be sent over the socket.""" tmsg = self.build_message(message) self._log_message("send", message) return self.send_raw_message(tmsg) def send_raw_message(self, raw_message_bytes): if not self.is_connected: raise IOError('Not connected') def maybe_write(): if not self._transport: return if self._transport.is_closing(): return self._transport.write(raw_message_bytes) NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write) # Class utility methods def build_message(self, message): """Build a serialized P2P message""" command = message.command data = message.serialize() tmsg = self.magic_bytes tmsg += command tmsg += b"\x00" * (12 - len(command)) tmsg += struct.pack("<I", len(data)) th = sha256(data) h = sha256(th) tmsg += h[:4] tmsg += data return tmsg def _log_message(self, direction, msg): """Logs a message being sent or received over the connection.""" if direction == "send": log_message = "Send message to " elif direction == "receive": log_message = "Received message from " log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500]) if len(log_message) > 500: log_message += "... (msg truncated)" logger.debug(log_message) class P2PInterface(P2PConnection): """A high-level P2P interface class for communicating with a Bitcoin node. This class provides high-level callbacks for processing P2P message payloads, as well as convenience methods for interacting with the node over P2P. Individual testcases should subclass this and override the on_* methods if they want to alter message handling behaviour.""" def __init__(self): super().__init__() # Track number of messages of each type received and the most recent # message of each type self.message_count = defaultdict(int) self.last_message = {} # A count of the number of ping messages we've sent to the node self.ping_counter = 1 # The network services received from the peer self.nServices = 0 def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs): create_conn = super().peer_connect(*args, **kwargs) if send_version: # Send a version msg vt = msg_version() vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 self.on_connection_send_msg = vt # Will be sent soon after connection_made return create_conn # Message receiving methods def on_message(self, message): """Receive message and dispatch message to appropriate callback. We keep a count of how many of each message type has been received and the most recent message of each type.""" with mininode_lock: try: command = message.command.decode('ascii') self.message_count[command] += 1 self.last_message[command] = message getattr(self, 'on_' + command)(message) except: print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0])) raise # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. def on_open(self): pass def on_close(self): pass def on_addr(self, message): pass def on_block(self, message): pass def on_blocktxn(self, message): pass def on_cmpctblock(self, message): pass def on_feefilter(self, message): pass def on_getaddr(self, message): pass def on_getblocks(self, message): pass def on_getblocktxn(self, message): pass def on_getdata(self, message): pass def on_getheaders(self, message): pass def on_headers(self, message): pass def on_mempool(self, message): pass def on_notfound(self, message): pass def on_pong(self, message): pass def on_reject(self, message): pass def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass def on_inv(self, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): self.send_message(want) def on_ping(self, message): self.send_message(msg_pong(message.nonce)) def on_verack(self, message): pass def on_version(self, message): assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED) self.send_message(msg_verack()) self.nServices = message.nServices # Connection helper methods def wait_for_disconnect(self, timeout=60): test_function = lambda: not self.is_connected wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message receiving helper methods def wait_for_tx(self, txid, timeout=60): def test_function(): assert self.is_connected if not self.last_message.get('tx'): return False return self.last_message['tx'].tx.rehash() == txid wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_block(self, blockhash, timeout=60): def test_function(): assert self.is_connected return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_header(self, blockhash, timeout=60): def test_function(): assert self.is_connected last_headers = self.last_message.get('headers') if not last_headers: return False return last_headers.headers[0].rehash() == blockhash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getdata(self, timeout=60): """Waits for a getdata message. Receiving any getdata message will satisfy the predicate. the last_message["getdata"] value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block/tx has been requested.""" def test_function(): assert self.is_connected return self.last_message.get("getdata") wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getheaders(self, timeout=60): """Waits for a getheaders message. Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"] value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block header has been requested.""" def test_function(): assert self.is_connected return self.last_message.get("getheaders") wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_inv(self, expected_inv, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError("wait_for_inv() will only verify the first inv object") def test_function(): assert self.is_connected return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_verack(self, timeout=60): def test_function(): return self.message_count["verack"] wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message sending helper functions def send_and_ping(self, message, timeout=60): self.send_message(message) self.sync_with_ping(timeout=timeout) # Sync up with the node def sync_with_ping(self, timeout=60): self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): assert self.is_connected return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter wait_until(test_function, timeout=timeout, lock=mininode_lock) self.ping_counter += 1 # One lock for synchronizing all data access between the network event loop (see # NetworkThread below) and the thread running the test logic. For simplicity, # P2PConnection acquires this lock whenever delivering a message to a P2PInterface. # This lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. mininode_lock = threading.RLock() class NetworkThread(threading.Thread): network_event_loop = None def __init__(self): super().__init__(name="NetworkThread") # There is only one event loop and no more than one thread must be created assert not self.network_event_loop NetworkThread.network_event_loop = asyncio.new_event_loop() def run(self): """Start the network thread.""" self.network_event_loop.run_forever() def close(self, timeout=10): """Close the connections and network event loop.""" self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout) self.network_event_loop.close() self.join(timeout) # Safe to remove event loop. NetworkThread.network_event_loop = None class P2PDataStore(P2PInterface): """A P2P data store class. Keeps a block and transaction store and responds correctly to getdata and getheaders requests.""" def __init__(self): super().__init__() # store of blocks. key is block hash, value is a CBlock object self.block_store = {} self.last_block_hash = '' # store of txs. key is txid, value is a CTransaction object self.tx_store = {} self.getdata_requests = [] def on_getdata(self, message): """Check for the tx/block in our stores and if found, reply with an inv message.""" for inv in message.inv: self.getdata_requests.append(inv.hash) if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys(): self.send_message(msg_tx(self.tx_store[inv.hash])) elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys(): self.send_message(msg_block(self.block_store[inv.hash])) else: logger.debug('getdata message type {} received.'.format(hex(inv.type))) def on_getheaders(self, message): """Search back through our block store for the locator, and reply with a headers message if found.""" locator, hash_stop = message.locator, message.hashstop # Assume that the most recent block added is the tip if not self.block_store: return headers_list = [self.block_store[self.last_block_hash]] maxheaders = 2000 while headers_list[-1].sha256 not in locator.vHave: # Walk back through the block store, adding headers to headers_list # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: prev_block_header = CBlockHeader(self.block_store[prev_block_hash]) headers_list.append(prev_block_header) if prev_block_header.sha256 == hash_stop: # if this is the hashstop header, stop here break else: logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash))) break # Truncate the list if there are too many headers headers_list = headers_list[:-maxheaders - 1:-1] response = msg_headers(headers_list) if response is not None: self.send_message(response) def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60): """Send blocks to test node and test whether the tip advances. - add all blocks to our block_store - send a headers message for the final block - the on_getheaders handler will ensure that any getheaders are responded to - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. - if success is True: assert that the node's tip advances to the most recent block - if success is False: assert that the node's tip doesn't advance - if reject_reason is set: assert that the correct reject message is logged""" with mininode_lock: for block in blocks: self.block_store[block.sha256] = block self.last_block_hash = block.sha256 reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): if force_send: for b in blocks: self.send_message(msg_block(block=b)) else: self.send_message(msg_headers([CBlockHeader(block) for block in blocks])) wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock) if expect_disconnect: self.wait_for_disconnect(timeout=timeout) else: self.sync_with_ping(timeout=timeout) if success: wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) else: assert node.getbestblockhash() != blocks[-1].hash def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): """Send txs to test node and test whether they're accepted to the mempool. - add all txs to our tx_store - send tx messages for all txs - if success is True/False: assert that the txs are/are not accepted to the mempool - if expect_disconnect is True: Skip the sync with ping - if reject_reason is set: assert that the correct reject message is logged.""" with mininode_lock: for tx in txs: self.tx_store[tx.sha256] = tx reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): for tx in txs: self.send_message(msg_tx(tx)) if expect_disconnect: self.wait_for_disconnect() else: self.sync_with_ping() raw_mempool = node.getrawmempool() if success: # Check that all txs are now in the mempool for tx in txs: assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash) else: # Check that none of the txs are now in the mempool for tx in txs: assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Notes: # # This generates makefiles suitable for inclusion into the Android build system # via an Android.mk file. It is based on make.py, the standard makefile # generator. # # The code below generates a separate .mk file for each target, but # all are sourced by the top-level GypAndroid.mk. This means that all # variables in .mk-files clobber one another, and furthermore that any # variables set potentially clash with other Android build system variables. # Try to avoid setting global variables where possible. import gyp import gyp.common import gyp.generator.make as make # Reuse global functions from make backend. import os import re import subprocess generator_default_variables = { 'OS': 'android', 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'SHARED_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_SUFFIX': '.so', 'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)', 'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)', 'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)', 'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)', 'LIB_DIR': '$(obj).$(TOOLSET)', 'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python. 'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python. 'RULE_INPUT_PATH': '$(RULE_SOURCES)', 'RULE_INPUT_EXT': '$(suffix $<)', 'RULE_INPUT_NAME': '$(notdir $<)', 'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)', } # Make supports multiple toolsets generator_supports_multiple_toolsets = True # Generator-specific gyp specs. generator_additional_non_configuration_keys = [ # Boolean to declare that this target does not want its name mangled. 'android_unmangled_name', # Map of android build system variables to set. 'aosp_build_settings', ] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] ALL_MODULES_FOOTER = """\ # "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from # all the included sub-makefiles. This is just here to clarify. gyp_all_modules: """ header = """\ # This file is generated by gyp; do not edit. """ # Map gyp target types to Android module classes. MODULE_CLASSES = { 'static_library': 'STATIC_LIBRARIES', 'shared_library': 'SHARED_LIBRARIES', 'executable': 'EXECUTABLES', } def IsCPPExtension(ext): return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx' def Sourceify(path): """Convert a path to its source directory form. The Android backend does not support options.generator_output, so this function is a noop.""" return path # Map from qualified target to path to output. # For Android, the target of these maps is a tuple ('static', 'modulename'), # ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string, # since we link by module. target_outputs = {} # Map from qualified target to any linkable output. A subset # of target_outputs. E.g. when mybinary depends on liba, we want to # include liba in the linker line; when otherbinary depends on # mybinary, we just want to build mybinary first. target_link_deps = {} class AndroidMkWriter(object): """AndroidMkWriter packages up the writing of one target-specific Android.mk. Its only real entry point is Write(), and is mostly used for namespacing. """ def __init__(self, android_top_dir): self.android_top_dir = android_top_dir def Write(self, qualified_target, relative_target, base_path, output_filename, spec, configs, part_of_all, write_alias_target, sdk_version): """The main entry point: writes a .mk file for a single target. Arguments: qualified_target: target we're generating relative_target: qualified target name relative to the root base_path: path relative to source root we're building in, used to resolve target-relative paths output_filename: output .mk file name to write spec, configs: gyp info part_of_all: flag indicating this target is part of 'all' write_alias_target: flag indicating whether to create short aliases for this target sdk_version: what to emit for LOCAL_SDK_VERSION in output """ gyp.common.EnsureDirExists(output_filename) self.fp = open(output_filename, 'w') self.fp.write(header) self.qualified_target = qualified_target self.relative_target = relative_target self.path = base_path self.target = spec['target_name'] self.type = spec['type'] self.toolset = spec['toolset'] deps, link_deps = self.ComputeDeps(spec) # Some of the generation below can add extra output, sources, or # link dependencies. All of the out params of the functions that # follow use names like extra_foo. extra_outputs = [] extra_sources = [] self.android_class = MODULE_CLASSES.get(self.type, 'GYP') self.android_module = self.ComputeAndroidModule(spec) (self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec) self.output = self.output_binary = self.ComputeOutput(spec) # Standard header. self.WriteLn('include $(CLEAR_VARS)\n') # Module class and name. self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class) self.WriteLn('LOCAL_MODULE := ' + self.android_module) # Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE. # The library module classes fail if the stem is set. ComputeOutputParts # makes sure that stem == modulename in these cases. if self.android_stem != self.android_module: self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem) self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix) if self.toolset == 'host': self.WriteLn('LOCAL_IS_HOST_MODULE := true') self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)') else: self.WriteLn('LOCAL_MODULE_TARGET_ARCH := ' '$(TARGET_$(GYP_VAR_PREFIX)ARCH)') self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version) # Grab output directories; needed for Actions and Rules. if self.toolset == 'host': self.WriteLn('gyp_intermediate_dir := ' '$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))') else: self.WriteLn('gyp_intermediate_dir := ' '$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))') self.WriteLn('gyp_shared_intermediate_dir := ' '$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))') self.WriteLn() # List files this target depends on so that actions/rules/copies/sources # can depend on the list. # TODO: doesn't pull in things through transitive link deps; needed? target_dependencies = [x[1] for x in deps if x[0] == 'path'] self.WriteLn('# Make sure our deps are built first.') self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES', local_pathify=True) # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: self.WriteActions(spec['actions'], extra_sources, extra_outputs) # Rules must be early like actions. if 'rules' in spec: self.WriteRules(spec['rules'], extra_sources, extra_outputs) if 'copies' in spec: self.WriteCopies(spec['copies'], extra_outputs) # GYP generated outputs. self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True) # Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend # on both our dependency targets and our generated files. self.WriteLn('# Make sure our deps and generated files are built first.') self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) ' '$(GYP_GENERATED_OUTPUTS)') self.WriteLn() # Sources. if spec.get('sources', []) or extra_sources: self.WriteSources(spec, configs, extra_sources) self.WriteTarget(spec, configs, deps, link_deps, part_of_all, write_alias_target) # Update global list of target outputs, used in dependency tracking. target_outputs[qualified_target] = ('path', self.output_binary) # Update global list of link dependencies. if self.type == 'static_library': target_link_deps[qualified_target] = ('static', self.android_module) elif self.type == 'shared_library': target_link_deps[qualified_target] = ('shared', self.android_module) self.fp.close() return self.android_module def WriteActions(self, actions, extra_sources, extra_outputs): """Write Makefile code for any 'actions' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these actions (used to make other pieces dependent on these actions) """ for action in actions: name = make.StringToMakefileVariable('%s_%s' % (self.relative_target, action['action_name'])) self.WriteLn('### Rules for action "%s":' % action['action_name']) inputs = action['inputs'] outputs = action['outputs'] # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set() for out in outputs: if not out.startswith('$'): print ('WARNING: Action for target "%s" writes output to local path ' '"%s".' % (self.target, out)) dir = os.path.split(out)[0] if dir: dirs.add(dir) if int(action.get('process_outputs_as_sources', False)): extra_sources += outputs # Prepare the actual command. command = gyp.common.EncodePOSIXShellList(action['action']) if 'message' in action: quiet_cmd = 'Gyp action: %s ($@)' % action['message'] else: quiet_cmd = 'Gyp action: %s ($@)' % name if len(dirs) > 0: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command cd_action = 'cd $(gyp_local_path)/%s; ' % self.path command = cd_action + command # The makefile rules are all relative to the top dir, but the gyp actions # are defined relative to their containing dir. This replaces the gyp_* # variables for the action rule with an absolute version so that the # output goes in the right place. # Only write the gyp_* rules for the "primary" output (:1); # it's superfluous for the "extra outputs", and this avoids accidentally # writing duplicate dummy rules for those outputs. main_output = make.QuoteSpaces(self.LocalPathify(outputs[0])) self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output) self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output) self.WriteLn('%s: gyp_intermediate_dir := ' '$(abspath $(gyp_intermediate_dir))' % main_output) self.WriteLn('%s: gyp_shared_intermediate_dir := ' '$(abspath $(gyp_shared_intermediate_dir))' % main_output) # Android's envsetup.sh adds a number of directories to the path including # the built host binary directory. This causes actions/rules invoked by # gyp to sometimes use these instead of system versions, e.g. bison. # The built host binaries may not be suitable, and can cause errors. # So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable # set by envsetup. self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output) # Don't allow spaces in input/output filenames, but make an exception for # filenames which start with '$(' since it's okay for there to be spaces # inside of make function/macro invocations. for input in inputs: if not input.startswith('$(') and ' ' in input: raise gyp.common.GypError( 'Action input filename "%s" in target %s contains a space' % (input, self.target)) for output in outputs: if not output.startswith('$(') and ' ' in output: raise gyp.common.GypError( 'Action output filename "%s" in target %s contains a space' % (output, self.target)) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' % (main_output, ' '.join(map(self.LocalPathify, inputs)))) self.WriteLn('\t@echo "%s"' % quiet_cmd) self.WriteLn('\t$(hide)%s\n' % command) for output in outputs[1:]: # Make each output depend on the main output, with an empty command # to force make to notice that the mtime has changed. self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output)) extra_outputs += outputs self.WriteLn() self.WriteLn() def WriteRules(self, rules, extra_sources, extra_outputs): """Write Makefile code for any 'rules' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these rules (used to make other pieces dependent on these rules) """ if len(rules) == 0: return for rule in rules: if len(rule.get('rule_sources', [])) == 0: continue name = make.StringToMakefileVariable('%s_%s' % (self.relative_target, rule['rule_name'])) self.WriteLn('\n### Generated for rule "%s":' % name) self.WriteLn('# "%s":' % rule) inputs = rule.get('inputs') for rule_source in rule.get('rule_sources', []): (rule_source_dirname, rule_source_basename) = os.path.split(rule_source) (rule_source_root, rule_source_ext) = \ os.path.splitext(rule_source_basename) outputs = [self.ExpandInputRoot(out, rule_source_root, rule_source_dirname) for out in rule['outputs']] dirs = set() for out in outputs: if not out.startswith('$'): print ('WARNING: Rule for target %s writes output to local path %s' % (self.target, out)) dir = os.path.dirname(out) if dir: dirs.add(dir) extra_outputs += outputs if int(rule.get('process_outputs_as_sources', False)): extra_sources.extend(outputs) components = [] for component in rule['action']: component = self.ExpandInputRoot(component, rule_source_root, rule_source_dirname) if '$(RULE_SOURCES)' in component: component = component.replace('$(RULE_SOURCES)', rule_source) components.append(component) command = gyp.common.EncodePOSIXShellList(components) cd_action = 'cd $(gyp_local_path)/%s; ' % self.path command = cd_action + command if dirs: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command # We set up a rule to build the first output, and then set up # a rule for each additional output to depend on the first. outputs = map(self.LocalPathify, outputs) main_output = outputs[0] self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output) self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output) self.WriteLn('%s: gyp_intermediate_dir := ' '$(abspath $(gyp_intermediate_dir))' % main_output) self.WriteLn('%s: gyp_shared_intermediate_dir := ' '$(abspath $(gyp_shared_intermediate_dir))' % main_output) # See explanation in WriteActions. self.WriteLn('%s: export PATH := ' '$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output) main_output_deps = self.LocalPathify(rule_source) if inputs: main_output_deps += ' ' main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs]) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' % (main_output, main_output_deps)) self.WriteLn('\t%s\n' % command) for output in outputs[1:]: # Make each output depend on the main output, with an empty command # to force make to notice that the mtime has changed. self.WriteLn('%s: %s ;' % (output, main_output)) self.WriteLn() self.WriteLn() def WriteCopies(self, copies, extra_outputs): """Write Makefile code for any 'copies' from the gyp input. extra_outputs: a list that will be filled in with any outputs of this action (used to make other pieces dependent on this action) """ self.WriteLn('### Generated for copy rule.') variable = make.StringToMakefileVariable(self.relative_target + '_copies') outputs = [] for copy in copies: for path in copy['files']: # The Android build system does not allow generation of files into the # source tree. The destination should start with a variable, which will # typically be $(gyp_intermediate_dir) or # $(gyp_shared_intermediate_dir). Note that we can't use an assertion # because some of the gyp tests depend on this. if not copy['destination'].startswith('$'): print ('WARNING: Copy rule for target %s writes output to ' 'local path %s' % (self.target, copy['destination'])) # LocalPathify() calls normpath, stripping trailing slashes. path = Sourceify(self.LocalPathify(path)) filename = os.path.split(path)[1] output = Sourceify(self.LocalPathify(os.path.join(copy['destination'], filename))) self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' % (output, path)) self.WriteLn('\t@echo Copying: $@') self.WriteLn('\t$(hide) mkdir -p $(dir $@)') self.WriteLn('\t$(hide) $(ACP) -rpf $< $@') self.WriteLn() outputs.append(output) self.WriteLn('%s = %s' % (variable, ' '.join(map(make.QuoteSpaces, outputs)))) extra_outputs.append('$(%s)' % variable) self.WriteLn() def WriteSourceFlags(self, spec, configs): """Write out the flags and include paths used to compile source files for the current target. Args: spec, configs: input from gyp. """ for configname, config in sorted(configs.iteritems()): extracted_includes = [] self.WriteLn('\n# Flags passed to both C and C++ files.') cflags, includes_from_cflags = self.ExtractIncludesFromCFlags( config.get('cflags', []) + config.get('cflags_c', [])) extracted_includes.extend(includes_from_cflags) self.WriteList(cflags, 'MY_CFLAGS_%s' % configname) self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname, prefix='-D', quoter=make.EscapeCppDefine) self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS') includes = list(config.get('include_dirs', [])) includes.extend(extracted_includes) includes = map(Sourceify, map(self.LocalPathify, includes)) includes = self.NormalizeIncludePaths(includes) self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname) self.WriteLn('\n# Flags passed to only C++ (and not C) files.') self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname) self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) ' '$(MY_DEFS_$(GYP_CONFIGURATION))') # Undefine ANDROID for host modules # TODO: the source code should not use macro ANDROID to tell if it's host # or target module. if self.toolset == 'host': self.WriteLn('# Undefine ANDROID for host modules') self.WriteLn('LOCAL_CFLAGS += -UANDROID') self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) ' '$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))') self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))') # Android uses separate flags for assembly file invocations, but gyp expects # the same CFLAGS to be applied: self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)') def WriteSources(self, spec, configs, extra_sources): """Write Makefile code for any 'sources' from the gyp input. These are source files necessary to build the current target. We need to handle shared_intermediate directory source files as a special case by copying them to the intermediate directory and treating them as a genereated sources. Otherwise the Android build rules won't pick them up. Args: spec, configs: input from gyp. extra_sources: Sources generated from Actions or Rules. """ sources = filter(make.Compilable, spec.get('sources', [])) generated_not_sources = [x for x in extra_sources if not make.Compilable(x)] extra_sources = filter(make.Compilable, extra_sources) # Determine and output the C++ extension used by these sources. # We simply find the first C++ file and use that extension. all_sources = sources + extra_sources local_cpp_extension = '.cpp' for source in all_sources: (root, ext) = os.path.splitext(source) if IsCPPExtension(ext): local_cpp_extension = ext break if local_cpp_extension != '.cpp': self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension) # We need to move any non-generated sources that are coming from the # shared intermediate directory out of LOCAL_SRC_FILES and put them # into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files # that don't match our local_cpp_extension, since Android will only # generate Makefile rules for a single LOCAL_CPP_EXTENSION. local_files = [] for source in sources: (root, ext) = os.path.splitext(source) if '$(gyp_shared_intermediate_dir)' in source: extra_sources.append(source) elif '$(gyp_intermediate_dir)' in source: extra_sources.append(source) elif IsCPPExtension(ext) and ext != local_cpp_extension: extra_sources.append(source) else: local_files.append(os.path.normpath(os.path.join(self.path, source))) # For any generated source, if it is coming from the shared intermediate # directory then we add a Make rule to copy them to the local intermediate # directory first. This is because the Android LOCAL_GENERATED_SOURCES # must be in the local module intermediate directory for the compile rules # to work properly. If the file has the wrong C++ extension, then we add # a rule to copy that to intermediates and use the new version. final_generated_sources = [] # If a source file gets copied, we still need to add the orginal source # directory as header search path, for GCC searches headers in the # directory that contains the source file by default. origin_src_dirs = [] for source in extra_sources: local_file = source if not '$(gyp_intermediate_dir)/' in local_file: basename = os.path.basename(local_file) local_file = '$(gyp_intermediate_dir)/' + basename (root, ext) = os.path.splitext(local_file) if IsCPPExtension(ext) and ext != local_cpp_extension: local_file = root + local_cpp_extension if local_file != source: self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source))) self.WriteLn('\tmkdir -p $(@D); cp $< $@') origin_src_dirs.append(os.path.dirname(source)) final_generated_sources.append(local_file) # We add back in all of the non-compilable stuff to make sure that the # make rules have dependencies on them. final_generated_sources.extend(generated_not_sources) self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES') origin_src_dirs = gyp.common.uniquer(origin_src_dirs) origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs)) self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS') self.WriteList(local_files, 'LOCAL_SRC_FILES') # Write out the flags used to compile the source; this must be done last # so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path. self.WriteSourceFlags(spec, configs) def ComputeAndroidModule(self, spec): """Return the Android module name used for a gyp spec. We use the complete qualified target name to avoid collisions between duplicate targets in different directories. We also add a suffix to distinguish gyp-generated module names. """ if int(spec.get('android_unmangled_name', 0)): assert self.type != 'shared_library' or self.target.startswith('lib') return self.target if self.type == 'shared_library': # For reasons of convention, the Android build system requires that all # shared library modules are named 'libfoo' when generating -l flags. prefix = 'lib_' else: prefix = '' if spec['toolset'] == 'host': suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp' else: suffix = '_gyp' if self.path: middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target)) else: middle = make.StringToMakefileVariable(self.target) return ''.join([prefix, middle, suffix]) def ComputeOutputParts(self, spec): """Return the 'output basename' of a gyp spec, split into filename + ext. Android libraries must be named the same thing as their module name, otherwise the linker can't find them, so product_name and so on must be ignored if we are building a library, and the "lib" prepending is not done for Android. """ assert self.type != 'loadable_module' # TODO: not supported? target = spec['target_name'] target_prefix = '' target_ext = '' if self.type == 'static_library': target = self.ComputeAndroidModule(spec) target_ext = '.a' elif self.type == 'shared_library': target = self.ComputeAndroidModule(spec) target_ext = '.so' elif self.type == 'none': target_ext = '.stamp' elif self.type != 'executable': print ("ERROR: What output file should be generated?", "type", self.type, "target", target) if self.type != 'static_library' and self.type != 'shared_library': target_prefix = spec.get('product_prefix', target_prefix) target = spec.get('product_name', target) product_ext = spec.get('product_extension') if product_ext: target_ext = '.' + product_ext target_stem = target_prefix + target return (target_stem, target_ext) def ComputeOutputBasename(self, spec): """Return the 'output basename' of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce 'libfoobar.so' """ return ''.join(self.ComputeOutputParts(spec)) def ComputeOutput(self, spec): """Return the 'output' (full output path) of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce '$(obj)/baz/libfoobar.so' """ if self.type == 'executable': # We install host executables into shared_intermediate_dir so they can be # run by gyp rules that refer to PRODUCT_DIR. path = '$(gyp_shared_intermediate_dir)' elif self.type == 'shared_library': if self.toolset == 'host': path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)' else: path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)' else: # Other targets just get built into their intermediate dir. if self.toolset == 'host': path = ('$(call intermediates-dir-for,%s,%s,true,,' '$(GYP_HOST_VAR_PREFIX))' % (self.android_class, self.android_module)) else: path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))' % (self.android_class, self.android_module)) assert spec.get('product_dir') is None # TODO: not supported? return os.path.join(path, self.ComputeOutputBasename(spec)) def NormalizeIncludePaths(self, include_paths): """ Normalize include_paths. Convert absolute paths to relative to the Android top directory. Args: include_paths: A list of unprocessed include paths. Returns: A list of normalized include paths. """ normalized = [] for path in include_paths: if path[0] == '/': path = gyp.common.RelativePath(path, self.android_top_dir) normalized.append(path) return normalized def ExtractIncludesFromCFlags(self, cflags): """Extract includes "-I..." out from cflags Args: cflags: A list of compiler flags, which may be mixed with "-I.." Returns: A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed. """ clean_cflags = [] include_paths = [] for flag in cflags: if flag.startswith('-I'): include_paths.append(flag[2:]) else: clean_cflags.append(flag) return (clean_cflags, include_paths) def FilterLibraries(self, libraries): """Filter the 'libraries' key to separate things that shouldn't be ldflags. Library entries that look like filenames should be converted to android module names instead of being passed to the linker as flags. Args: libraries: the value of spec.get('libraries') Returns: A tuple (static_lib_modules, dynamic_lib_modules, ldflags) """ static_lib_modules = [] dynamic_lib_modules = [] ldflags = [] for libs in libraries: # Libs can have multiple words. for lib in libs.split(): # Filter the system libraries, which are added by default by the Android # build system. if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or lib.endswith('libgcc.a')): continue match = re.search(r'([^/]+)\.a$', lib) if match: static_lib_modules.append(match.group(1)) continue match = re.search(r'([^/]+)\.so$', lib) if match: dynamic_lib_modules.append(match.group(1)) continue if lib.startswith('-l'): ldflags.append(lib) return (static_lib_modules, dynamic_lib_modules, ldflags) def ComputeDeps(self, spec): """Compute the dependencies of a gyp spec. Returns a tuple (deps, link_deps), where each is a list of filenames that will need to be put in front of make for either building (deps) or linking (link_deps). """ deps = [] link_deps = [] if 'dependencies' in spec: deps.extend([target_outputs[dep] for dep in spec['dependencies'] if target_outputs[dep]]) for dep in spec['dependencies']: if dep in target_link_deps: link_deps.append(target_link_deps[dep]) deps.extend(link_deps) return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) def WriteTargetFlags(self, spec, configs, link_deps): """Write Makefile code to specify the link flags and library dependencies. spec, configs: input from gyp. link_deps: link dependency list; see ComputeDeps() """ # Libraries (i.e. -lfoo) # These must be included even for static libraries as some of them provide # implicit include paths through the build system. libraries = gyp.common.uniquer(spec.get('libraries', [])) static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries) if self.type != 'static_library': for configname, config in sorted(configs.iteritems()): ldflags = list(config.get('ldflags', [])) self.WriteLn('') self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname) self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS') self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) ' '$(LOCAL_GYP_LIBS)') # Link dependencies (i.e. other gyp targets this target depends on) # These need not be included for static libraries as within the gyp build # we do not use the implicit include path mechanism. if self.type != 'static_library': static_link_deps = [x[1] for x in link_deps if x[0] == 'static'] shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared'] else: static_link_deps = [] shared_link_deps = [] # Only write the lists if they are non-empty. if static_libs or static_link_deps: self.WriteLn('') self.WriteList(static_libs + static_link_deps, 'LOCAL_STATIC_LIBRARIES') self.WriteLn('# Enable grouping to fix circular references') self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true') if dynamic_libs or shared_link_deps: self.WriteLn('') self.WriteList(dynamic_libs + shared_link_deps, 'LOCAL_SHARED_LIBRARIES') def WriteTarget(self, spec, configs, deps, link_deps, part_of_all, write_alias_target): """Write Makefile code to produce the final target of the gyp spec. spec, configs: input from gyp. deps, link_deps: dependency lists; see ComputeDeps() part_of_all: flag indicating this target is part of 'all' write_alias_target: flag indicating whether to create short aliases for this target """ self.WriteLn('### Rules for final target.') if self.type != 'none': self.WriteTargetFlags(spec, configs, link_deps) settings = spec.get('aosp_build_settings', {}) if settings: self.WriteLn('### Set directly by aosp_build_settings.') for k, v in settings.iteritems(): if isinstance(v, list): self.WriteList(v, k) else: self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v))) self.WriteLn('') # Add to the set of targets which represent the gyp 'all' target. We use the # name 'gyp_all_modules' as the Android build system doesn't allow the use # of the Make target 'all' and because 'all_modules' is the equivalent of # the Make target 'all' on Android. if part_of_all and write_alias_target: self.WriteLn('# Add target alias to "gyp_all_modules" target.') self.WriteLn('.PHONY: gyp_all_modules') self.WriteLn('gyp_all_modules: %s' % self.android_module) self.WriteLn('') # Add an alias from the gyp target name to the Android module name. This # simplifies manual builds of the target, and is required by the test # framework. if self.target != self.android_module and write_alias_target: self.WriteLn('# Alias gyp target name.') self.WriteLn('.PHONY: %s' % self.target) self.WriteLn('%s: %s' % (self.target, self.android_module)) self.WriteLn('') # Add the command to trigger build of the target type depending # on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY # NOTE: This has to come last! modifier = '' if self.toolset == 'host': modifier = 'HOST_' if self.type == 'static_library': self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier) elif self.type == 'shared_library': self.WriteLn('LOCAL_PRELINK_MODULE := false') self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier) elif self.type == 'executable': # Executables are for build and test purposes only, so they're installed # to a directory that doesn't get included in the system image. self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)') self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier) else: self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp') self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true') if self.toolset == 'target': self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)') else: self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)') self.WriteLn() self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk') self.WriteLn() self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)') self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"') self.WriteLn('\t$(hide) mkdir -p $(dir $@)') self.WriteLn('\t$(hide) touch $@') self.WriteLn() self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=') def WriteList(self, value_list, variable=None, prefix='', quoter=make.QuoteIfNecessary, local_pathify=False): """Write a variable definition that is a list of values. E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out foo = blaha blahb but in a pretty-printed style. """ values = '' if value_list: value_list = [quoter(prefix + l) for l in value_list] if local_pathify: value_list = [self.LocalPathify(l) for l in value_list] values = ' \\\n\t' + ' \\\n\t'.join(value_list) self.fp.write('%s :=%s\n\n' % (variable, values)) def WriteLn(self, text=''): self.fp.write(text + '\n') def LocalPathify(self, path): """Convert a subdirectory-relative path into a normalized path which starts with the make variable $(LOCAL_PATH) (i.e. the top of the project tree). Absolute paths, or paths that contain variables, are just normalized.""" if '$(' in path or os.path.isabs(path): # path is not a file in the project tree in this case, but calling # normpath is still important for trimming trailing slashes. return os.path.normpath(path) local_path = os.path.join('$(LOCAL_PATH)', self.path, path) local_path = os.path.normpath(local_path) # Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH) # - i.e. that the resulting path is still inside the project tree. The # path may legitimately have ended up containing just $(LOCAL_PATH), though, # so we don't look for a slash. assert local_path.startswith('$(LOCAL_PATH)'), ( 'Path %s attempts to escape from gyp path %s !)' % (path, self.path)) return local_path def ExpandInputRoot(self, template, expansion, dirname): if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template: return template path = template % { 'INPUT_ROOT': expansion, 'INPUT_DIRNAME': dirname, } return os.path.normpath(path) def PerformBuild(data, configurations, params): # The android backend only supports the default configuration. options = params['options'] makefile = os.path.abspath(os.path.join(options.toplevel_dir, 'GypAndroid.mk')) env = dict(os.environ) env['ONE_SHOT_MAKEFILE'] = makefile arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules'] print 'Building: %s' % arguments subprocess.check_call(arguments, env=env) def GenerateOutput(target_list, target_dicts, data, params): options = params['options'] generator_flags = params.get('generator_flags', {}) builddir_name = generator_flags.get('output_dir', 'out') limit_to_target_all = generator_flags.get('limit_to_target_all', False) write_alias_targets = generator_flags.get('write_alias_targets', True) sdk_version = generator_flags.get('aosp_sdk_version', 19) android_top_dir = os.environ.get('ANDROID_BUILD_TOP') assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.' def CalculateMakefilePath(build_file, base_name): """Determine where to write a Makefile for a given gyp file.""" # Paths in gyp files are relative to the .gyp file, but we want # paths relative to the source root for the master makefile. Grab # the path of the .gyp file as the base to relativize against. # E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp". base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.depth) # We write the file in the base_path directory. output_file = os.path.join(options.depth, base_path, base_name) assert not options.generator_output, ( 'The Android backend does not support options.generator_output.') base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.toplevel_dir) return base_path, output_file # TODO: search for the first non-'Default' target. This can go # away when we add verification that all targets have the # necessary configurations. default_configuration = None toolsets = set([target_dicts[target]['toolset'] for target in target_list]) for target in target_list: spec = target_dicts[target] if spec['default_configuration'] != 'Default': default_configuration = spec['default_configuration'] break if not default_configuration: default_configuration = 'Default' srcdir = '.' makefile_name = 'GypAndroid' + options.suffix + '.mk' makefile_path = os.path.join(options.toplevel_dir, makefile_name) assert not options.generator_output, ( 'The Android backend does not support options.generator_output.') gyp.common.EnsureDirExists(makefile_path) root_makefile = open(makefile_path, 'w') root_makefile.write(header) # We set LOCAL_PATH just once, here, to the top of the project tree. This # allows all the other paths we use to be relative to the Android.mk file, # as the Android build system expects. root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n') # Find the list of targets that derive from the gyp file(s) being built. needed_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, build_file): needed_targets.add(target) build_files = set() include_list = set() android_modules = {} for qualified_target in target_list: build_file, target, toolset = gyp.common.ParseQualifiedTarget( qualified_target) relative_build_file = gyp.common.RelativePath(build_file, options.toplevel_dir) build_files.add(relative_build_file) included_files = data[build_file]['included_files'] for included_file in included_files: # The included_files entries are relative to the dir of the build file # that included them, so we have to undo that and then make them relative # to the root dir. relative_include_file = gyp.common.RelativePath( gyp.common.UnrelativePath(included_file, build_file), options.toplevel_dir) abs_include_file = os.path.abspath(relative_include_file) # If the include file is from the ~/.gyp dir, we should use absolute path # so that relocating the src dir doesn't break the path. if (params['home_dot_gyp'] and abs_include_file.startswith(params['home_dot_gyp'])): build_files.add(abs_include_file) else: build_files.add(relative_include_file) base_path, output_file = CalculateMakefilePath(build_file, target + '.' + toolset + options.suffix + '.mk') spec = target_dicts[qualified_target] configs = spec['configurations'] part_of_all = (qualified_target in needed_targets and not int(spec.get('suppress_wildcard', False))) if limit_to_target_all and not part_of_all: continue relative_target = gyp.common.QualifiedTarget(relative_build_file, target, toolset) writer = AndroidMkWriter(android_top_dir) android_module = writer.Write(qualified_target, relative_target, base_path, output_file, spec, configs, part_of_all=part_of_all, write_alias_target=write_alias_targets, sdk_version=sdk_version) if android_module in android_modules: print ('ERROR: Android module names must be unique. The following ' 'targets both generate Android module name %s.\n %s\n %s' % (android_module, android_modules[android_module], qualified_target)) return android_modules[android_module] = qualified_target # Our root_makefile lives at the source root. Compute the relative path # from there to the output_file for including. mkfile_rel_path = gyp.common.RelativePath(output_file, os.path.dirname(makefile_path)) include_list.add(mkfile_rel_path) root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration) root_makefile.write('GYP_VAR_PREFIX ?=\n') root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n') root_makefile.write('GYP_HOST_MULTILIB ?=\n') # Write out the sorted list of includes. root_makefile.write('\n') for include_file in sorted(include_list): root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n') root_makefile.write('\n') if write_alias_targets: root_makefile.write(ALL_MODULES_FOOTER) root_makefile.close()
"""Test zha fan.""" import pytest import zigpy.profiles.zha as zha import zigpy.zcl.clusters.general as general import zigpy.zcl.clusters.hvac as hvac import zigpy.zcl.foundation as zcl_f from homeassistant.components import fan from homeassistant.components.fan import ( ATTR_SPEED, DOMAIN, SERVICE_SET_SPEED, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SPEED_OFF, ) from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN from homeassistant.components.zha.core.discovery import GROUP_PROBE from homeassistant.components.zha.core.group import GroupMember from homeassistant.const import ( ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_ON, STATE_UNAVAILABLE, ) from homeassistant.setup import async_setup_component from .common import ( async_enable_traffic, async_find_group_entity_id, async_test_rejoin, find_entity_id, get_zha_gateway, send_attributes_report, ) from tests.async_mock import AsyncMock, call, patch IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8" IEEE_GROUPABLE_DEVICE2 = "02:2d:6f:00:0a:90:69:e8" @pytest.fixture def zigpy_device(zigpy_device_mock): """Device tracker zigpy device.""" endpoints = { 1: { "in_clusters": [hvac.Fan.cluster_id], "out_clusters": [], "device_type": zha.DeviceType.ON_OFF_SWITCH, } } return zigpy_device_mock( endpoints, node_descriptor=b"\x02@\x8c\x02\x10RR\x00\x00\x00R\x00\x00" ) @pytest.fixture async def coordinator(hass, zigpy_device_mock, zha_device_joined): """Test zha fan platform.""" zigpy_device = zigpy_device_mock( { 1: { "in_clusters": [general.Groups.cluster_id], "out_clusters": [], "device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT, } }, ieee="00:15:8d:00:02:32:4f:32", nwk=0x0000, node_descriptor=b"\xf8\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff", ) zha_device = await zha_device_joined(zigpy_device) zha_device.available = True return zha_device @pytest.fixture async def device_fan_1(hass, zigpy_device_mock, zha_device_joined): """Test zha fan platform.""" zigpy_device = zigpy_device_mock( { 1: { "in_clusters": [ general.Groups.cluster_id, general.OnOff.cluster_id, hvac.Fan.cluster_id, ], "out_clusters": [], "device_type": zha.DeviceType.ON_OFF_LIGHT, }, }, ieee=IEEE_GROUPABLE_DEVICE, ) zha_device = await zha_device_joined(zigpy_device) zha_device.available = True await hass.async_block_till_done() return zha_device @pytest.fixture async def device_fan_2(hass, zigpy_device_mock, zha_device_joined): """Test zha fan platform.""" zigpy_device = zigpy_device_mock( { 1: { "in_clusters": [ general.Groups.cluster_id, general.OnOff.cluster_id, hvac.Fan.cluster_id, general.LevelControl.cluster_id, ], "out_clusters": [], "device_type": zha.DeviceType.ON_OFF_LIGHT, }, }, ieee=IEEE_GROUPABLE_DEVICE2, ) zha_device = await zha_device_joined(zigpy_device) zha_device.available = True await hass.async_block_till_done() return zha_device async def test_fan(hass, zha_device_joined_restored, zigpy_device): """Test zha fan platform.""" zha_device = await zha_device_joined_restored(zigpy_device) cluster = zigpy_device.endpoints.get(1).fan entity_id = await find_entity_id(DOMAIN, zha_device, hass) assert entity_id is not None assert hass.states.get(entity_id).state == STATE_OFF await async_enable_traffic(hass, [zha_device], enabled=False) # test that the fan was created and that it is unavailable assert hass.states.get(entity_id).state == STATE_UNAVAILABLE # allow traffic to flow through the gateway and device await async_enable_traffic(hass, [zha_device]) # test that the state has changed from unavailable to off assert hass.states.get(entity_id).state == STATE_OFF # turn on at fan await send_attributes_report(hass, cluster, {1: 2, 0: 1, 2: 3}) assert hass.states.get(entity_id).state == STATE_ON # turn off at fan await send_attributes_report(hass, cluster, {1: 1, 0: 0, 2: 2}) assert hass.states.get(entity_id).state == STATE_OFF # turn on from HA cluster.write_attributes.reset_mock() await async_turn_on(hass, entity_id) assert len(cluster.write_attributes.mock_calls) == 1 assert cluster.write_attributes.call_args == call({"fan_mode": 2}) # turn off from HA cluster.write_attributes.reset_mock() await async_turn_off(hass, entity_id) assert len(cluster.write_attributes.mock_calls) == 1 assert cluster.write_attributes.call_args == call({"fan_mode": 0}) # change speed from HA cluster.write_attributes.reset_mock() await async_set_speed(hass, entity_id, speed=fan.SPEED_HIGH) assert len(cluster.write_attributes.mock_calls) == 1 assert cluster.write_attributes.call_args == call({"fan_mode": 3}) # test adding new fan to the network and HA await async_test_rejoin(hass, zigpy_device, [cluster], (1,)) async def async_turn_on(hass, entity_id, speed=None): """Turn fan on.""" data = { key: value for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_SPEED, speed)] if value is not None } await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data, blocking=True) async def async_turn_off(hass, entity_id): """Turn fan off.""" data = {ATTR_ENTITY_ID: entity_id} if entity_id else {} await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data, blocking=True) async def async_set_speed(hass, entity_id, speed=None): """Set speed for specified fan.""" data = { key: value for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_SPEED, speed)] if value is not None } await hass.services.async_call(DOMAIN, SERVICE_SET_SPEED, data, blocking=True) @patch( "zigpy.zcl.clusters.hvac.Fan.write_attributes", new=AsyncMock(return_value=zcl_f.WriteAttributesResponse.deserialize(b"\x00")[0]), ) async def test_zha_group_fan_entity(hass, device_fan_1, device_fan_2, coordinator): """Test the fan entity for a ZHA group.""" zha_gateway = get_zha_gateway(hass) assert zha_gateway is not None zha_gateway.coordinator_zha_device = coordinator coordinator._zha_gateway = zha_gateway device_fan_1._zha_gateway = zha_gateway device_fan_2._zha_gateway = zha_gateway member_ieee_addresses = [device_fan_1.ieee, device_fan_2.ieee] members = [GroupMember(device_fan_1.ieee, 1), GroupMember(device_fan_2.ieee, 1)] # test creating a group with 2 members zha_group = await zha_gateway.async_create_zigpy_group("Test Group", members) await hass.async_block_till_done() assert zha_group is not None assert len(zha_group.members) == 2 for member in zha_group.members: assert member.device.ieee in member_ieee_addresses assert member.group == zha_group assert member.endpoint is not None entity_domains = GROUP_PROBE.determine_entity_domains(hass, zha_group) assert len(entity_domains) == 2 assert LIGHT_DOMAIN in entity_domains assert DOMAIN in entity_domains entity_id = async_find_group_entity_id(hass, DOMAIN, zha_group) assert hass.states.get(entity_id) is not None group_fan_cluster = zha_group.endpoint[hvac.Fan.cluster_id] dev1_fan_cluster = device_fan_1.device.endpoints[1].fan dev2_fan_cluster = device_fan_2.device.endpoints[1].fan await async_enable_traffic(hass, [device_fan_1, device_fan_2], enabled=False) await hass.async_block_till_done() # test that the fans were created and that they are unavailable assert hass.states.get(entity_id).state == STATE_UNAVAILABLE # allow traffic to flow through the gateway and device await async_enable_traffic(hass, [device_fan_1, device_fan_2]) # test that the fan group entity was created and is off assert hass.states.get(entity_id).state == STATE_OFF # turn on from HA group_fan_cluster.write_attributes.reset_mock() await async_turn_on(hass, entity_id) await hass.async_block_till_done() assert len(group_fan_cluster.write_attributes.mock_calls) == 1 assert group_fan_cluster.write_attributes.call_args[0][0] == {"fan_mode": 2} # turn off from HA group_fan_cluster.write_attributes.reset_mock() await async_turn_off(hass, entity_id) assert len(group_fan_cluster.write_attributes.mock_calls) == 1 assert group_fan_cluster.write_attributes.call_args[0][0] == {"fan_mode": 0} # change speed from HA group_fan_cluster.write_attributes.reset_mock() await async_set_speed(hass, entity_id, speed=fan.SPEED_HIGH) assert len(group_fan_cluster.write_attributes.mock_calls) == 1 assert group_fan_cluster.write_attributes.call_args[0][0] == {"fan_mode": 3} # test some of the group logic to make sure we key off states correctly await send_attributes_report(hass, dev1_fan_cluster, {0: 0}) await send_attributes_report(hass, dev2_fan_cluster, {0: 0}) # test that group fan is off assert hass.states.get(entity_id).state == STATE_OFF await send_attributes_report(hass, dev2_fan_cluster, {0: 2}) await hass.async_block_till_done() # test that group fan is speed medium assert hass.states.get(entity_id).state == STATE_ON await send_attributes_report(hass, dev2_fan_cluster, {0: 0}) await hass.async_block_till_done() # test that group fan is now off assert hass.states.get(entity_id).state == STATE_OFF @pytest.mark.parametrize( "plug_read, expected_state, expected_speed", ( (None, STATE_OFF, None), ({"fan_mode": 0}, STATE_OFF, SPEED_OFF), ({"fan_mode": 1}, STATE_ON, SPEED_LOW), ({"fan_mode": 2}, STATE_ON, SPEED_MEDIUM), ({"fan_mode": 3}, STATE_ON, SPEED_HIGH), ), ) async def test_fan_init( hass, zha_device_joined_restored, zigpy_device, plug_read, expected_state, expected_speed, ): """Test zha fan platform.""" cluster = zigpy_device.endpoints.get(1).fan cluster.PLUGGED_ATTR_READS = plug_read zha_device = await zha_device_joined_restored(zigpy_device) entity_id = await find_entity_id(DOMAIN, zha_device, hass) assert entity_id is not None assert hass.states.get(entity_id).state == expected_state assert hass.states.get(entity_id).attributes[ATTR_SPEED] == expected_speed async def test_fan_update_entity( hass, zha_device_joined_restored, zigpy_device, ): """Test zha fan platform.""" cluster = zigpy_device.endpoints.get(1).fan cluster.PLUGGED_ATTR_READS = {"fan_mode": 0} zha_device = await zha_device_joined_restored(zigpy_device) entity_id = await find_entity_id(DOMAIN, zha_device, hass) assert entity_id is not None assert hass.states.get(entity_id).state == STATE_OFF assert hass.states.get(entity_id).attributes[ATTR_SPEED] == SPEED_OFF assert cluster.read_attributes.await_count == 1 await async_setup_component(hass, "homeassistant", {}) await hass.async_block_till_done() await hass.services.async_call( "homeassistant", "update_entity", {"entity_id": entity_id}, blocking=True ) assert hass.states.get(entity_id).state == STATE_OFF assert hass.states.get(entity_id).attributes[ATTR_SPEED] == SPEED_OFF assert cluster.read_attributes.await_count == 2 cluster.PLUGGED_ATTR_READS = {"fan_mode": 1} await hass.services.async_call( "homeassistant", "update_entity", {"entity_id": entity_id}, blocking=True ) assert hass.states.get(entity_id).state == STATE_ON assert hass.states.get(entity_id).attributes[ATTR_SPEED] == SPEED_LOW assert cluster.read_attributes.await_count == 3
#!/usr/bin/env python3 # Copyright (c) 2017-2021 The Particl Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import random from test_framework.test_particl import ParticlTestFramework from test_framework.util import assert_raises_rpc_error from test_framework.address import base58_to_byte from test_framework.key import SECP256K1, ECPubKey from test_framework.messages import COIN from test_framework.messages import sha256 class AnonTest(ParticlTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [ ['-debug','-noacceptnonstdtxn','-reservebalance=10000000'] for i in range(self.num_nodes)] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self, split=False): self.add_nodes(self.num_nodes, extra_args=self.extra_args) self.start_nodes() self.connect_nodes_bi(0, 1) self.connect_nodes_bi(0, 2) self.sync_all() def run_test(self): nodes = self.nodes self.import_genesis_coins_a(nodes[0]) txnHashes = [] nodes[1].extkeyimportmaster('drip fog service village program equip minute dentist series hawk crop sphere olympic lazy garbage segment fox library good alley steak jazz force inmate') sxAddrTo1_1 = nodes[1].getnewstealthaddress('lblsx11') assert(sxAddrTo1_1 == 'TetbYTGv5LiqyFiUD3a5HHbpSinQ9KiRYDGAMvRzPfz4RnHMbKGAwDr1fjLGJ5Eqg1XDwpeGyqWMiwdK3qM3zKWjzHNpaatdoHVzzA') nodes[2].extkeyimportmaster(nodes[2].mnemonic('new')['master']) sxAddrTo0_1 = nodes[0].getnewstealthaddress('lblsx01') txnHashes.append(nodes[0].sendtypeto('part', 'anon', [{'address': sxAddrTo1_1, 'amount': 1, 'narr': 'node0 -> node1 p->a'}, ])) txnHashes.append(nodes[0].sendtypeto('part', 'blind', [{'address': sxAddrTo0_1, 'amount': 1000, 'narr': 'node0 -> node0 p->b'}, ])) txnHashes.append(nodes[0].sendtypeto('blind', 'anon', [{'address': sxAddrTo1_1, 'amount': 100, 'narr': 'node0 -> node1 b->a 1'}, ])) txnHashes.append(nodes[0].sendtypeto('blind', 'anon', [{'address': sxAddrTo1_1, 'amount': 100, 'narr': 'node0 -> node1 b->a 2'}, ])) txnHashes.append(nodes[0].sendtypeto('blind', 'anon', [{'address': sxAddrTo1_1, 'amount': 100, 'narr': 'node0 -> node1 b->a 3'}, ])) txnHashes.append(nodes[0].sendtypeto('blind', 'anon', [{'address': sxAddrTo1_1, 'amount': 10, 'narr': 'node0 -> node1 b->a 4'}, ])) for k in range(5): txnHashes.append(nodes[0].sendtypeto('part', 'anon', [{'address': sxAddrTo1_1, 'amount': 10, 'narr': 'node0 -> node1 p->a'}, ])) for k in range(10): txnHashes.append(nodes[0].sendtypeto('blind', 'anon', [{'address': sxAddrTo1_1, 'amount': 10, 'narr': 'node0 -> node1 b->a'}, ])) for h in txnHashes: assert(self.wait_for_mempool(nodes[1], h)) assert('node0 -> node1 b->a 4' in self.dumpj(nodes[1].listtransactions('*', 100))) assert('node0 -> node1 b->a 4' in self.dumpj(nodes[0].listtransactions('*', 100))) self.stakeBlocks(2) block1_hash = nodes[1].getblockhash(1) ro = nodes[1].getblock(block1_hash) for txnHash in txnHashes: assert(txnHash in ro['tx']) txnHash = nodes[1].sendtypeto('anon', 'anon', [{'address': sxAddrTo0_1, 'amount': 1, 'narr': 'node1 -> node0 a->a'}, ]) txnHashes = [txnHash,] assert(self.wait_for_mempool(nodes[0], txnHash)) self.stakeBlocks(1) ro = nodes[1].getblock(nodes[1].getblockhash(3)) for txnHash in txnHashes: assert(txnHash in ro['tx']) assert(nodes[1].anonoutput()['lastindex'] == 28) txnHashes.clear() txnHashes.append(nodes[1].sendtypeto('anon', 'anon', [{'address': sxAddrTo0_1, 'amount': 101, 'narr': 'node1 -> node0 a->a'}, ], '', '', 5, 1)) txnHashes.append(nodes[1].sendtypeto('anon', 'anon', [{'address': sxAddrTo0_1, 'amount': 0.1}, ], '', '', 5, 2)) assert(nodes[1].getwalletinfo()['anon_balance'] > 10) outputs = [{'address': sxAddrTo0_1, 'amount': 10, 'subfee': True},] ro = nodes[1].sendtypeto('anon', 'part', outputs, 'comment_to', 'comment_from', 4, 32, True) assert(ro['bytes'] > 0) txnHashes.append(nodes[1].sendtypeto('anon', 'part', outputs)) txnHashes.append(nodes[1].sendtypeto('anon', 'anon', [{'address': sxAddrTo1_1, 'amount': 1},])) for txhash in txnHashes: assert(self.wait_for_mempool(nodes[0], txhash)) self.log.info('Test filtertransactions with type filter') ro = nodes[1].filtertransactions({'type': 'anon', 'count': 20, 'show_anon_spends': True, 'show_change': True}) assert(len(ro) > 2) foundTx = 0 for t in ro: if t['txid'] == txnHashes[-1]: foundTx += 1 assert(t['amount'] == t['fee']) elif t['txid'] == txnHashes[-2]: foundTx += 1 assert('anon_inputs' in t) assert(t['amount'] < -9.9 and t['amount'] > -10.0) n_standard = 0 n_anon = 0 for to in t['outputs']: if to['type'] == 'standard': n_standard += 1 elif to['type'] == 'anon': n_anon += 1 assert(to['is_change'] == 'true') assert(n_standard == 1) assert(n_anon > 0) assert(t['type_in'] == 'anon') if t['txid'] == txnHashes[-3]: foundTx += 1 assert(t['outputs'][0]['type'] == 'anon') if foundTx > 2: break assert(foundTx > 2) self.log.info('Test unspent with address filter') unspent_filtered = nodes[1].listunspentanon(1, 9999, [sxAddrTo1_1]) assert(unspent_filtered[0]['label'] == 'lblsx11') self.log.info('Test permanent lockunspent') unspent = nodes[1].listunspentanon() assert(nodes[1].lockunspent(False, [unspent[0]], True) == True) assert(nodes[1].lockunspent(False, [unspent[1]], True) == True) assert(len(nodes[1].listlockunspent()) == 2) locked_balances = nodes[1].getlockedbalances() assert(locked_balances['trusted_anon'] > 0.0) assert(locked_balances['num_locked'] == 2) # Restart node self.sync_all() self.stop_node(1) self.start_node(1, self.extra_args[1] + ['-wallet=default_wallet',]) self.connect_nodes_bi(0, 1) assert(len(nodes[1].listlockunspent()) == 2) assert(len(nodes[1].listunspentanon()) < len(unspent)) assert(nodes[1].lockunspent(True, [unspent[0]]) == True) assert_raises_rpc_error(-8, 'Invalid parameter, expected locked output', nodes[1].lockunspent, True, [unspent[0]]) assert(len(nodes[1].listunspentanon()) == len(unspent)-1) assert(nodes[1].lockunspent(True) == True) assert(len(nodes[1].listunspentanon()) == len(unspent)) assert(nodes[1].lockunspent(True) == True) ro = nodes[2].getblockstats(nodes[2].getblockchaininfo()['blocks']) assert(ro['height'] == 3) self.log.info('Test recover from mnemonic') # Txns currently in the mempool will be reprocessed in the next block self.stakeBlocks(1) wi_1 = nodes[1].getwalletinfo() nodes[1].createwallet('test_import') w1_2 = nodes[1].get_wallet_rpc('test_import') w1_2.extkeyimportmaster('drip fog service village program equip minute dentist series hawk crop sphere olympic lazy garbage segment fox library good alley steak jazz force inmate') w1_2.getnewstealthaddress('lblsx11') w1_2.rescanblockchain(0) wi_1_2 = w1_2.getwalletinfo() assert(wi_1_2['anon_balance'] == wi_1['anon_balance']) nodes[1].createwallet('test_import_locked') w1_3 = nodes[1].get_wallet_rpc('test_import_locked') w1_3.encryptwallet('test') assert_raises_rpc_error(-13, 'Error: Wallet locked, please enter the wallet passphrase with walletpassphrase first.', w1_3.filtertransactions, {'show_blinding_factors': True}) assert_raises_rpc_error(-13, 'Error: Wallet locked, please enter the wallet passphrase with walletpassphrase first.', w1_3.filtertransactions, {'show_anon_spends': True}) w1_3.walletpassphrase('test', 30) # Skip initial rescan by passing -1 as scan_chain_from w1_3.extkeyimportmaster('drip fog service village program equip minute dentist series hawk crop sphere olympic lazy garbage segment fox library good alley steak jazz force inmate', '', False, 'imported key', 'imported acc', -1) w1_3.getnewstealthaddress('lblsx11') w1_3.walletsettings('other', {'onlyinstance': False}) w1_3.walletlock() assert(w1_3.getwalletinfo()['encryptionstatus'] == 'Locked') w1_3.rescanblockchain(0) w1_3.walletpassphrase('test', 30) wi_1_3 = w1_3.getwalletinfo() assert(wi_1_3['anon_balance'] == wi_1['anon_balance']) self.log.info('Test sendtypeto coincontrol') w1_inputs = w1_2.listunspentanon() assert(len(w1_inputs) > 1) use_input = w1_inputs[random.randint(0, len(w1_inputs) - 1)] coincontrol = {'inputs': [{'tx': use_input['txid'], 'n': use_input['vout']}]} txid = w1_2.sendtypeto('anon', 'anon', [{'address': sxAddrTo0_1, 'amount': 0.01}, ], '', '', 7, 1, False, coincontrol) w1_inputs_after = w1_2.listunspentanon() for txin in w1_inputs_after: if txin['txid'] == use_input['txid'] and txin['vout'] == use_input['vout']: raise ValueError('Output should be spent') assert(self.wait_for_mempool(nodes[1], txid)) raw_tx = w1_2.getrawtransaction(txid, True) possible_inputs = raw_tx['vin'][0]['ring_row_0'].split(', ') possible_inputs_txids = [] for pi in possible_inputs: anonoutput = w1_2.anonoutput(pi) possible_inputs_txids.append(anonoutput['txnhash'] + '.' + str(anonoutput['n'])) assert(use_input['txid'] + '.' + str(use_input['vout']) in possible_inputs_txids) num_tries = 20 for i in range(num_tries): if nodes[0].getbalances()['mine']['anon_immature'] == 0.0: break self.stakeBlocks(1) if i >= num_tries - 1: raise ValueError('anon balance immature') assert(nodes[0].getbalances()['mine']['anon_trusted'] > 100.0) self.log.info('Test crafting anon transactions.') sxAddr2_1 = nodes[2].getnewstealthaddress('lblsx01') ephem = nodes[0].derivefromstealthaddress(sxAddr2_1) blind = bytes(random.getrandbits(8) for i in range(32)).hex() outputs = [{ 'address': sxAddr2_1, 'type': 'anon', 'amount': 10.0, 'blindingfactor': blind, 'ephemeral_key': ephem['ephemeral_privatekey'], },] tx = nodes[0].createrawparttransaction([], outputs) options = {'sign_tx': True} tx_signed = nodes[0].fundrawtransactionfrom('anon', tx['hex'], {}, tx['amounts'], options) txid = nodes[0].sendrawtransaction(tx_signed['hex']) self.stakeBlocks(1) sx_privkey = nodes[2].dumpprivkey(sxAddr2_1) assert('scan_secret' in sx_privkey) assert('spend_secret' in sx_privkey) sx_pubkey = nodes[2].getaddressinfo(sxAddr2_1) assert('scan_public_key' in sx_pubkey) assert('spend_public_key' in sx_pubkey) stealth_key = nodes[2].derivefromstealthaddress(sxAddr2_1, ephem['ephemeral_pubkey']) prevtx = nodes[2].decoderawtransaction(tx_signed['hex']) found_output = -1 for vout in prevtx['vout']: if vout['type'] != 'anon': continue try: ro = nodes[2].verifycommitment(vout['valueCommitment'], blind, 10.0) assert(ro['result'] is True) ro = nodes[2].rewindrangeproof(vout['rangeproof'], vout['valueCommitment'], stealth_key['privatekey'], ephem['ephemeral_pubkey']) assert(ro['amount'] == 10.0) found_output = vout['n'] except Exception as e: if not str(e).startswith('Mismatched commitment'): print(e) assert(found_output > -1) key_bytes = base58_to_byte(stealth_key['privatekey'])[0][0:32] epk = ECPubKey() epk.set(bytes.fromhex(ephem['ephemeral_pubkey'])) self.log.info('Test rewindrangeproof with final nonce') # ECDH P = SECP256K1.affine(epk.p) M = SECP256K1.affine(SECP256K1.mul([((P[0], P[1], P[2]), int.from_bytes(key_bytes, 'big'))])) eM = bytes([0x02 + (M[1] & 1)]) + M[0].to_bytes(32, 'big') hM = sha256(eM) hhM = sha256(hM) # Reverse, SetHex is LE hhM = hhM[::-1] vout = prevtx['vout'][found_output] ro = nodes[2].rewindrangeproof(vout['rangeproof'], vout['valueCommitment'], hhM.hex()) assert(ro['amount'] == 10.0) self.log.info('Test signing for unowned anon input') # Input not in wallet, must be in chain for pubkey index prev_tx_signed = nodes[0].decoderawtransaction(tx_signed['hex']) prev_commitment = prev_tx_signed['vout'][found_output]['valueCommitment'] prev_public_key = prev_tx_signed['vout'][found_output]['pubkey'] assert(prev_public_key == stealth_key['pubkey']) outputs = [{ 'address': sxAddr2_1, 'type': 'anon', 'amount': 10.0, },] tx = nodes[0].createrawparttransaction([], outputs) options = { 'subtractFeeFromOutputs': [0,], 'inputs': [{ 'tx': txid, 'n': found_output, 'type': 'anon', 'value': 10.0, 'commitment': prev_commitment, 'pubkey': prev_public_key, 'privkey': stealth_key['privatekey'], 'blind': blind, }], 'feeRate': 0.001, 'sign_tx': True, } input_amounts = { } used_input = (txid, found_output) tx_signed = nodes[0].fundrawtransactionfrom('anon', tx['hex'], input_amounts, tx['amounts'], options) num_tries = 20 for i in range(num_tries): try: spending_txid = nodes[0].sendrawtransaction(tx_signed['hex']) break except Exception: self.stakeBlocks(1) if i >= num_tries - 1: raise ValueError('Can\'t submit txn') assert(self.wait_for_mempool(nodes[2], spending_txid)) self.stakeBlocks(1) w2b = nodes[2].getbalances() assert(w2b['mine']['anon_immature'] < 10 and w2b['mine']['anon_immature'] > 9) self.log.info('Test subfee edge case') unspents = nodes[0].listunspent() total_input = int(unspents[0]['amount'] * COIN) + int(unspents[1]['amount'] * COIN) total_output = total_input - 1 coincontrol = {'test_mempool_accept': True, 'show_hex': True, 'show_fee': True, 'inputs': [{'tx': unspents[0]['txid'],'n': unspents[0]['vout']}, {'tx': unspents[1]['txid'],'n': unspents[1]['vout']}]} outputs = [{'address': sxAddrTo0_1, 'amount': '%i.%08i' % (total_output // COIN, total_output % COIN), 'narr': '', 'subfee' : True},] tx = nodes[0].sendtypeto('part', 'anon', outputs, 'comment', 'comment-to', 5, 1, False, coincontrol) assert(total_input == int(tx['fee'] * COIN) + int(tx['outputs_fee'][sxAddrTo0_1])) assert(tx['mempool-allowed'] == True) self.log.info('Test checkkeyimage') unspents = nodes[0].listunspentanon(0, 999999, [], True, {'show_pubkeys': True}) anon_pubkey = unspents[0]['pubkey'] keyimage = nodes[0].getkeyimage(anon_pubkey)['keyimage'] spent = nodes[0].checkkeyimage(keyimage) assert(spent['spent'] is False) raw_tx = nodes[0].decoderawtransaction(nodes[0].gettransaction(used_input[0])['hex']) used_pubkey = raw_tx['vout'][used_input[1]]['pubkey'] used_keyimage = nodes[2].getkeyimage(used_pubkey)['keyimage'] spent = nodes[0].checkkeyimage(used_keyimage) assert(spent['spent'] is True) assert(spent['txid'] == spending_txid) self.log.info('Test rollbackrctindex') nodes[0].rollbackrctindex() if __name__ == '__main__': AnonTest().main()
"""Testing for K-means""" import numpy as np import warnings from scipy import sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.fixes import unique from sklearn.metrics.cluster import v_measure_score from sklearn.cluster import KMeans, k_means from sklearn.cluster import MiniBatchKMeans from sklearn.cluster.k_means_ import _labels_inertia from sklearn.cluster.k_means_ import _mini_batch_step from sklearn.cluster._k_means import csr_row_norm_l2 from sklearn.datasets.samples_generator import make_blobs # non centered, sparse centers to check the centers = np.array([ [0.0, 5.0, 0.0, 0.0, 0.0], [1.0, 1.0, 4.0, 0.0, 0.0], [1.0, 0.0, 0.0, 5.0, 1.0], ]) n_samples = 100 n_clusters, n_features = centers.shape X, true_labels = make_blobs(n_samples=n_samples, centers=centers, cluster_std=1., random_state=42) X_csr = sp.csr_matrix(X) def test_square_norms(): x_squared_norms = (X ** 2).sum(axis=1) x_squared_norms_from_csr = csr_row_norm_l2(X_csr) assert_array_almost_equal(x_squared_norms, x_squared_norms_from_csr, 5) def test_kmeans_dtype(): rnd = np.random.RandomState(0) X = rnd.normal(size=(40, 2)) X = (X * 10).astype(np.uint8) km = KMeans(n_init=1).fit(X) with warnings.catch_warnings(record=True) as w: assert_array_equal(km.labels_, km.predict(X)) assert_equal(len(w), 1) def test_labels_assignement_and_inertia(): # pure numpy implementation as easily auditable reference gold # implementation rng = np.random.RandomState(42) noisy_centers = centers + rng.normal(size=centers.shape) labels_gold = - np.ones(n_samples, dtype=np.int) mindist = np.empty(n_samples) mindist.fill(np.infty) for center_id in range(n_clusters): dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1) labels_gold[dist < mindist] = center_id mindist = np.minimum(dist, mindist) inertia_gold = mindist.sum() assert_true((mindist >= 0.0).all()) assert_true((labels_gold != -1).all()) # perform label assignement using the dense array input x_squared_norms = (X ** 2).sum(axis=1) labels_array, inertia_array = _labels_inertia( X, x_squared_norms, noisy_centers) assert_array_almost_equal(inertia_array, inertia_gold) assert_array_equal(labels_array, labels_gold) # perform label assignement using the sparse CSR input x_squared_norms_from_csr = csr_row_norm_l2(X_csr) labels_csr, inertia_csr = _labels_inertia( X_csr, x_squared_norms_from_csr, noisy_centers) assert_array_almost_equal(inertia_csr, inertia_gold) assert_array_equal(labels_csr, labels_gold) def test_minibatch_update_consistency(): """Check that dense and sparse minibatch update give the same results""" rng = np.random.RandomState(42) old_centers = centers + rng.normal(size=centers.shape) new_centers = old_centers.copy() new_centers_csr = old_centers.copy() counts = np.zeros(new_centers.shape[0], dtype=np.int32) counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32) x_squared_norms = (X ** 2).sum(axis=1) x_squared_norms_csr = csr_row_norm_l2(X_csr, squared=True) buffer = np.zeros(centers.shape[1], dtype=np.double) buffer_csr = np.zeros(centers.shape[1], dtype=np.double) # extract a small minibatch X_mb = X[:10] X_mb_csr = X_csr[:10] x_mb_squared_norms = x_squared_norms[:10] x_mb_squared_norms_csr = x_squared_norms_csr[:10] # step 1: compute the dense minibatch update old_inertia, incremental_diff = _mini_batch_step( X_mb, x_mb_squared_norms, new_centers, counts, buffer, 1) assert_greater(old_inertia, 0.0) # compute the new inertia on the same batch to check that it decreased labels, new_inertia = _labels_inertia( X_mb, x_mb_squared_norms, new_centers) assert_greater(new_inertia, 0.0) assert_less(new_inertia, old_inertia) # check that the incremental difference computation is matching the # final observed value effective_diff = np.sum((new_centers - old_centers) ** 2) assert_almost_equal(incremental_diff, effective_diff) # step 2: compute the sparse minibatch update old_inertia_csr, incremental_diff_csr = _mini_batch_step( X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr, buffer_csr, 1) assert_greater(old_inertia_csr, 0.0) # compute the new inertia on the same batch to check that it decreased labels_csr, new_inertia_csr = _labels_inertia( X_mb_csr, x_mb_squared_norms_csr, new_centers_csr) assert_greater(new_inertia_csr, 0.0) assert_less(new_inertia_csr, old_inertia_csr) # check that the incremental difference computation is matching the # final observed value effective_diff = np.sum((new_centers_csr - old_centers) ** 2) assert_almost_equal(incremental_diff_csr, effective_diff) # step 3: check that sparse and dense updates lead to the same results assert_array_equal(labels, labels_csr) assert_array_almost_equal(new_centers, new_centers_csr) assert_almost_equal(incremental_diff, incremental_diff_csr) assert_almost_equal(old_inertia, old_inertia_csr) assert_almost_equal(new_inertia, new_inertia_csr) def _check_fitted_model(km): # check that the number of clusters centers and distinct labels match # the expectation centers = km.cluster_centers_ assert_equal(centers.shape, (n_clusters, n_features)) labels = km.labels_ assert_equal(np.unique(labels).shape[0], n_clusters) # check that the labels assignements are perfect (up to a permutation) assert_equal(v_measure_score(true_labels, labels), 1.0) assert_greater(km.inertia_, 0.0) # check error on dataset being too small assert_raises(ValueError, km.fit, [[0., 1.]]) def test_k_means_plus_plus_init(): km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42).fit(X) _check_fitted_model(km) def test_k_means_check_fitted(): km = KMeans(n_clusters=n_clusters, random_state=42) assert_raises(AttributeError, km._check_fitted) def test_k_means_new_centers(): # Explore the part of the code where a new center is reassigned X = np.array([[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0]]) labels = [0, 1, 2, 1, 1, 2] bad_centers = np.array([[+0, 1, 0, 0], [.2, 0, .2, .2], [+0, 0, 0, 0]]) km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10, random_state=1) for this_X in (X, sp.coo_matrix(X)): km.fit(this_X) this_labels = km.labels_ # Reorder the labels so that the first instance is in cluster 0, # the second in cluster 1, ... this_labels = unique(this_labels, return_index=True)[1][this_labels] np.testing.assert_array_equal(this_labels, labels) def _get_mac_os_version(): import platform mac_version, _, _ = platform.mac_ver() if mac_version: # turn something like '10.7.3' into '10.7' return '.'.join(mac_version.split('.')[:2]) def test_k_means_plus_plus_init_2_jobs(): if _get_mac_os_version() >= '10.7': raise SkipTest('Multi-process bug in Mac OS X Lion (see issue #636)') km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2, random_state=42).fit(X) _check_fitted_model(km) def test_k_means_plus_plus_init_sparse(): km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42) km.fit(X_csr) _check_fitted_model(km) def test_k_means_random_init(): km = KMeans(init="random", n_clusters=n_clusters, random_state=42) km.fit(X) _check_fitted_model(km) def test_k_means_random_init_sparse(): km = KMeans(init="random", n_clusters=n_clusters, random_state=42) km.fit(X_csr) _check_fitted_model(km) def test_k_means_plus_plus_init_not_precomputed(): km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42, precompute_distances=False).fit(X) _check_fitted_model(km) def test_k_means_random_init_not_precomputed(): km = KMeans(init="random", n_clusters=n_clusters, random_state=42, precompute_distances=False).fit(X) _check_fitted_model(km) def test_k_means_perfect_init(): km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42, n_init=1) km.fit(X) _check_fitted_model(km) def test_mb_k_means_plus_plus_init_dense_array(): mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters, random_state=42) mb_k_means.fit(X) _check_fitted_model(mb_k_means) def test_mb_kmeans_verbose(): mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters, random_state=42, verbose=1) from cStringIO import StringIO import sys old_stdout = sys.stdout sys.stdout = StringIO() mb_k_means.fit(X) sys.stdout = old_stdout def test_mb_k_means_plus_plus_init_sparse_matrix(): mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters, random_state=42) mb_k_means.fit(X_csr) _check_fitted_model(mb_k_means) def test_minibatch_init_with_large_k(): mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20) # Check that a warning is raised, as the number clusters is larger # than the init_size with warnings.catch_warnings(record=True) as warn_queue: mb_k_means.fit(X) assert_equal(len(warn_queue), 1) def test_minibatch_k_means_random_init_dense_array(): # increase n_init to make random init stable enough mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters, random_state=42, n_init=10).fit(X) _check_fitted_model(mb_k_means) def test_minibatch_k_means_random_init_sparse_csr(): # increase n_init to make random init stable enough mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters, random_state=42, n_init=10).fit(X_csr) _check_fitted_model(mb_k_means) def test_minibatch_k_means_perfect_init_dense_array(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42).fit(X) _check_fitted_model(mb_k_means) def test_minibatch_k_means_perfect_init_sparse_csr(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42).fit(X_csr) _check_fitted_model(mb_k_means) def test_sparse_mb_k_means_callable_init(): def test_init(X, k, random_state): return centers mb_k_means = MiniBatchKMeans(init=test_init, random_state=42).fit(X_csr) _check_fitted_model(mb_k_means) def test_mini_batch_k_means_random_init_partial_fit(): km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42) # use the partial_fit API for online learning for X_minibatch in np.array_split(X, 10): km.partial_fit(X_minibatch) # compute the labeling on the complete dataset labels = km.predict(X) assert_equal(v_measure_score(true_labels, labels), 1.0) def test_minibatch_default_init_size(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, batch_size=10, random_state=42).fit(X) assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size) _check_fitted_model(mb_k_means) def test_minibatch_tol(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10, random_state=42, tol=.01).fit(X) _check_fitted_model(mb_k_means) def test_minibatch_set_init_size(): mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters, init_size=666, random_state=42).fit(X) assert_equal(mb_k_means.init_size, 666) assert_equal(mb_k_means.init_size_, n_samples) _check_fitted_model(mb_k_means) def test_k_means_invalid_init(): km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters) assert_raises(ValueError, km.fit, X) def test_mini_match_k_means_invalid_init(): km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters) assert_raises(ValueError, km.fit, X) def test_k_means_copyx(): """Check if copy_x=False returns nearly equal X after de-centering.""" my_X = X.copy() km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42) km.fit(my_X) _check_fitted_model(km) # check if my_X is centered assert_array_almost_equal(my_X, X) def test_k_means_non_collapsed(): """Check k_means with a bad initialization does not yield a singleton Starting with bad centers that are quickly ignored should not result in a repositioning of the centers to the center of mass that would lead to collapsed centers which in turns make the clustering dependent of the numerical unstabilities. """ my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]]) array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]]) km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1) km.fit(my_X) # centers must not been collapsed assert_equal(len(np.unique(km.labels_)), 3) centers = km.cluster_centers_ assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1) assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1) assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1) def test_predict(): km = KMeans(n_clusters=n_clusters, random_state=42) km.fit(X) # sanity check: predict centroid labels pred = km.predict(km.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # sanity check: re-predict labeling for training set samples pred = km.predict(X) assert_array_equal(pred, km.labels_) # re-predict labels for training set using fit_predict pred = km.fit_predict(X) assert_array_equal(pred, km.labels_) def test_score(): km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42) s1 = km1.fit(X).score(X) km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42) s2 = km2.fit(X).score(X) assert_greater(s2, s1) def test_predict_minibatch_dense_input(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X) # sanity check: predict centroid labels pred = mb_k_means.predict(mb_k_means.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # sanity check: re-predict labeling for training set samples pred = mb_k_means.predict(X) assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_) def test_predict_minibatch_kmeanspp_init_sparse_input(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++', n_init=10).fit(X_csr) # sanity check: re-predict labeling for training set samples assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_) # sanity check: predict centroid labels pred = mb_k_means.predict(mb_k_means.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # check that models trained on sparse input also works for dense input at # predict time assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_) def test_predict_minibatch_random_init_sparse_input(): mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=10).fit(X_csr) # sanity check: re-predict labeling for training set samples assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_) # sanity check: predict centroid labels pred = mb_k_means.predict(mb_k_means.cluster_centers_) assert_array_equal(pred, np.arange(n_clusters)) # check that models trained on sparse input also works for dense input at # predict time assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_) def test_input_dtypes(): X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]] X_int = np.array(X_list, dtype=np.int32) X_int_csr = sp.csr_matrix(X_int) init_int = X_int[:2] fitted_models = [ KMeans(n_clusters=2).fit(X_list), KMeans(n_clusters=2).fit(X_int), KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list), KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int), # mini batch kmeans is very unstable on such a small dataset hence # we use many inits MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list), MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int), MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr), MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int).fit(X_list), MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int).fit(X_int), MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int).fit(X_int_csr), ] expected_labels = [0, 1, 1, 0, 0, 1] scores = np.array([v_measure_score(expected_labels, km.labels_) for km in fitted_models]) assert_array_equal(scores, np.ones(scores.shape[0])) def test_transform(): km = KMeans(n_clusters=n_clusters) km.fit(X) X_new = km.transform(km.cluster_centers_) for c in range(n_clusters): assert_equal(X_new[c, c], 0) for c2 in range(n_clusters): if c != c2: assert_greater(X_new[c, c2], 0) def test_fit_transform(): X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X) X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X) assert_array_equal(X1, X2) def test_n_init(): """Check that increasing the number of init increases the quality""" n_runs = 5 n_init_range = [1, 5, 10] inertia = np.zeros((len(n_init_range), n_runs)) for i, n_init in enumerate(n_init_range): for j in range(n_runs): km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, random_state=j).fit(X) inertia[i, j] = km.inertia_ inertia = inertia.mean(axis=1) failure_msg = ("Inertia %r should be decreasing" " when n_init is increasing.") % list(inertia) for i in range(len(n_init_range) - 1): assert_true(inertia[i] >= inertia[i + 1], failure_msg) def test_k_means_function(): # test calling the k_means function directly # catch output from cStringIO import StringIO import sys old_stdout = sys.stdout sys.stdout = StringIO() cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters, verbose=True) sys.stdout = old_stdout centers = cluster_centers assert_equal(centers.shape, (n_clusters, n_features)) labels = labels assert_equal(np.unique(labels).shape[0], n_clusters) # check that the labels assignements are perfect (up to a permutation) assert_equal(v_measure_score(true_labels, labels), 1.0) assert_greater(inertia, 0.0) # check warning when centers are passed with warnings.catch_warnings(record=True) as w: k_means(X, n_clusters=n_clusters, init=centers) assert_equal(len(w), 1) # to many clusters desired assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module for tfq.python.layers.high_level.noisy_controlled_pqc layer.""" import numbers import numpy as np import tensorflow as tf import cirq import sympy from tensorflow_quantum.core.ops.noise import noisy_expectation_op from tensorflow_quantum.core.ops.noise import noisy_sampled_expectation_op from tensorflow_quantum.python.differentiators import parameter_shift from tensorflow_quantum.python.layers.circuit_construction import elementary from tensorflow_quantum.python import util class NoisyControlledPQC(tf.keras.layers.Layer): """Noisy Controlled Parametrized Quantum Circuit (PQC) Layer. The `NoisyControlledPQC` layer is the noisy variant of the `ControlledPQC` layer. This layer uses monte carlo trajectory simulation to support noisy simulation functionality for the `ControlledPQC` layer. Here is a simple example you can use to get started: >>> bit = cirq.GridQubit(0, 0) >>> model = cirq.Circuit( ... cirq.X(bit) ** sympy.Symbol('alpha'), ... cirq.Z(bit) ** sympy.Symbol('beta'), ... cirq.depolarize(0.01)(bit) ... ) >>> outputs = tfq.layers.NoisyControlledPQC( ... model, ... cirq.Z(bit), ... repetitions=1000, ... sample_based=False .. ) >>> quantum_data = tfq.convert_to_tensor([ ... cirq.Circuit(), ... cirq.Circuit(cirq.X(bit)) ... ]) >>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) >>> res = outputs([quantum_data, model_params]) >>> res tf.Tensor( [[-1.4901161e-08] [-7.0710683e-01]], shape=(2, 1), dtype=float32) The above example estimates the noisy expectation values using 1000 monte-carlo trajectory simulations with analytical calculations done on each trajectory. Just like with the `PQC` it is *very important* that the quantum datapoint circuits do not contain any `sympy.Symbols` themselves (This can be supported with advanced usage of the `tfq.layers.Expectation` layer with backend='noisy'). Just like `ControlledPQC` it is possible to specify multiple readout operations and switch to sample based expectation calculation based on measured bitstrings instead of analytic calculation: >>> bit = cirq.GridQubit(0, 0) >>> model = cirq.Circuit( ... cirq.X(bit) ** sympy.Symbol('alpha'), ... cirq.Z(bit) ** sympy.Symbol('beta'), ... cirq.depolarize(0.01)(bit) ... ) >>> outputs = tfq.layers.NoisyControlledPQC( ... model, ... [cirq.Z(bit), cirq.X(bit), cirq.Y(bit)], ... repetitions=1000, ... sample_based=True ... ) >>> quantum_data = tfq.convert_to_tensor([ ... cirq.Circuit(), ... cirq.Circuit(cirq.X(bit)) ... ]) >>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) >>> res = outputs([quantum_data, model_params]) >>> res tf.Tensor( [[-0.0028 1. -0.0028] [-0.6956 -0.498 -0.498 ]], shape=(2, 3), dtype=float32) Unlike `ControlledPQC` a value for `backend` can not be supplied in the layer constructor. If you want to use a custom backend please use `tfq.layers.PQC` instead. A value for `differentiator` can also be supplied in the constructor to indicate the differentiation scheme this `NoisyControlledPQC` layer should use. Here's how you would take the gradients of the above example: >>> bit = cirq.GridQubit(0, 0) >>> model = cirq.Circuit( ... cirq.X(bit) ** sympy.Symbol('alpha'), ... cirq.Z(bit) ** sympy.Symbol('beta'), ... cirq.depolarize(0.01)(bit) ... ) >>> outputs = tfq.layers.NoisyControlledPQC( ... model, ... [cirq.Z(bit), cirq.X(bit), cirq.Y(bit)], ... repetitions=5000, ... sample_based=True, ... differentiator=tfq.differentiators.ParameterShift()) >>> quantum_data = tfq.convert_to_tensor([ ... cirq.Circuit(), ... cirq.Circuit(cirq.X(bit)) ... ]) >>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) >>> with tf.GradientTape() as g: ... g.watch(model_params) ... res = outputs([quantum_data, model_params]) >>> grads = g.gradient(res, model_params) >>> grads tf.Tensor( [[-3.1415927 3.1415927 ] [-0.9211149 0.02764606]], shape=(2, 2), dtype=float32)] Lastly, like all layers in TensorFlow the `NoisyControlledPQC` layer can be called on any `tf.Tensor` as long as it is the right shape. This means you could replace `model_params` in the above example with the outputs from a `tf.keras.Dense` layer or replace `quantum_data` with values fed in from a `tf.keras.Input`. """ def __init__(self, model_circuit, operators, *, repetitions=None, sample_based=None, differentiator=None, **kwargs): """Instantiate this layer. Create a layer that will output noisy expectation values of the given operators when fed quantum data to it's input layer. This layer will take two input tensors, one representing a quantum data source (these circuits must not contain any symbols) and the other representing control parameters for the model circuit that gets appended to the datapoints. model_circuit: `cirq.Circuit` containing `sympy.Symbols` that will be used as the model which will be fed quantum data inputs. operators: `cirq.PauliSum` or Python `list` of `cirq.PauliSum` objects used as observables at the end of the model circuit. repetitions: Python `int` indicating how many trajectories to use when estimating expectation values. sample_based: Python `bool` indicating whether to use sampling to estimate expectations or analytic calculations with each trajectory. differentiator: Optional `tfq.differentiator` object to specify how gradients of `model_circuit` should be calculated. """ super().__init__(**kwargs) # Ingest model_circuit. if not isinstance(model_circuit, cirq.Circuit): raise TypeError("model_circuit must be a cirq.Circuit object." " Given: ".format(model_circuit)) self._symbols_list = list( sorted(util.get_circuit_symbols(model_circuit))) self._symbols = tf.constant([str(x) for x in self._symbols_list]) self._circuit = util.convert_to_tensor([model_circuit]) if len(self._symbols_list) == 0: raise ValueError("model_circuit has no sympy.Symbols. Please " "provide a circuit that contains symbols so " "that their values can be trained.") # Ingest operators. if isinstance(operators, (cirq.PauliString, cirq.PauliSum)): operators = [operators] if not isinstance(operators, (list, np.ndarray, tuple)): raise TypeError("operators must be a cirq.PauliSum or " "cirq.PauliString, or a list, tuple, " "or np.array containing them. " "Got {}.".format(type(operators))) if not all([ isinstance(op, (cirq.PauliString, cirq.PauliSum)) for op in operators ]): raise TypeError("Each element in operators to measure " "must be a cirq.PauliString" " or cirq.PauliSum") self._operators = util.convert_to_tensor([operators]) # Ingest and promote repetitions. if repetitions is None: raise ValueError("Value for repetitions must be provided when " "using noisy simulation.") if not isinstance(repetitions, numbers.Integral): raise TypeError("repetitions must be a positive integer value." " Given: ".format(repetitions)) if repetitions <= 0: raise ValueError("Repetitions must be greater than zero.") self._repetitions = tf.constant( [[repetitions for _ in range(len(operators))]], dtype=tf.dtypes.int32) # Ingest differentiator. if differentiator is None: differentiator = parameter_shift.ParameterShift() # Ingest and promote sample based. if sample_based is None: raise ValueError("Please specify sample_based=False for analytic " "calculations based on monte-carlo trajectories," " or sampled_based=True for measurement based " "noisy estimates.") if not isinstance(sample_based, bool): raise TypeError("sample_based must be either True or False." " received: {}".format(type(sample_based))) if not sample_based: self._executor = differentiator.generate_differentiable_op( sampled_op=noisy_expectation_op.expectation) else: self._executor = differentiator.generate_differentiable_op( sampled_op=noisy_sampled_expectation_op.sampled_expectation) self._append_layer = elementary.AddCircuit() @property def symbols(self): """The symbols that are managed by this layer (in-order). Note: `symbols[i]` indicates what symbol name the managed variables in this layer map to. """ return [sympy.Symbol(x) for x in self._symbols_list] def call(self, inputs): """Keras call function.""" circuit_batch_dim = tf.gather(tf.shape(inputs[0]), 0) tiled_up_model = tf.tile(self._circuit, [circuit_batch_dim]) model_appended = self._append_layer(inputs[0], append=tiled_up_model) tiled_up_operators = tf.tile(self._operators, [circuit_batch_dim, 1]) tiled_up_repetitions = tf.tile(self._repetitions, [circuit_batch_dim, 1]) return self._executor(model_appended, self._symbols, inputs[1], tiled_up_operators, tiled_up_repetitions)
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import importlib import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.framework import constant_op from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import nn_ops from tensorflow.python.ops.distributions import laplace as laplace_lib from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging def try_import(name): # pylint: disable=invalid-name module = None try: module = importlib.import_module(name) except ImportError as e: tf_logging.warning("Could not import %s: %s" % (name, str(e))) return module stats = try_import("scipy.stats") @test_util.run_all_in_graph_and_eager_modes class LaplaceTest(test.TestCase): def testLaplaceShape(self): loc = constant_op.constant([3.0] * 5) scale = constant_op.constant(11.0) laplace = laplace_lib.Laplace(loc=loc, scale=scale) self.assertEqual(self.evaluate(laplace.batch_shape_tensor()), (5,)) self.assertEqual(laplace.batch_shape, tensor_shape.TensorShape([5])) self.assertAllEqual(self.evaluate(laplace.event_shape_tensor()), []) self.assertEqual(laplace.event_shape, tensor_shape.TensorShape([])) def testLaplaceLogPDF(self): batch_size = 6 loc = constant_op.constant([2.0] * batch_size) scale = constant_op.constant([3.0] * batch_size) loc_v = 2.0 scale_v = 3.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) laplace = laplace_lib.Laplace(loc=loc, scale=scale) log_pdf = laplace.log_prob(x) self.assertEqual(log_pdf.get_shape(), (6,)) if not stats: return expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v) self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf) pdf = laplace.prob(x) self.assertEqual(pdf.get_shape(), (6,)) self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf)) def testLaplaceLogPDFMultidimensional(self): batch_size = 6 loc = constant_op.constant([[2.0, 4.0]] * batch_size) scale = constant_op.constant([[3.0, 4.0]] * batch_size) loc_v = np.array([2.0, 4.0]) scale_v = np.array([3.0, 4.0]) x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T laplace = laplace_lib.Laplace(loc=loc, scale=scale) log_pdf = laplace.log_prob(x) log_pdf_values = self.evaluate(log_pdf) self.assertEqual(log_pdf.get_shape(), (6, 2)) pdf = laplace.prob(x) pdf_values = self.evaluate(pdf) self.assertEqual(pdf.get_shape(), (6, 2)) if not stats: return expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v) self.assertAllClose(log_pdf_values, expected_log_pdf) self.assertAllClose(pdf_values, np.exp(expected_log_pdf)) def testLaplaceLogPDFMultidimensionalBroadcasting(self): batch_size = 6 loc = constant_op.constant([[2.0, 4.0]] * batch_size) scale = constant_op.constant(3.0) loc_v = np.array([2.0, 4.0]) scale_v = 3.0 x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T laplace = laplace_lib.Laplace(loc=loc, scale=scale) log_pdf = laplace.log_prob(x) log_pdf_values = self.evaluate(log_pdf) self.assertEqual(log_pdf.get_shape(), (6, 2)) pdf = laplace.prob(x) pdf_values = self.evaluate(pdf) self.assertEqual(pdf.get_shape(), (6, 2)) if not stats: return expected_log_pdf = stats.laplace.logpdf(x, loc_v, scale=scale_v) self.assertAllClose(log_pdf_values, expected_log_pdf) self.assertAllClose(pdf_values, np.exp(expected_log_pdf)) def testLaplaceCDF(self): batch_size = 6 loc = constant_op.constant([2.0] * batch_size) scale = constant_op.constant([3.0] * batch_size) loc_v = 2.0 scale_v = 3.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) laplace = laplace_lib.Laplace(loc=loc, scale=scale) cdf = laplace.cdf(x) self.assertEqual(cdf.get_shape(), (6,)) if not stats: return expected_cdf = stats.laplace.cdf(x, loc_v, scale=scale_v) self.assertAllClose(self.evaluate(cdf), expected_cdf) def testLaplaceLogCDF(self): batch_size = 6 loc = constant_op.constant([2.0] * batch_size) scale = constant_op.constant([3.0] * batch_size) loc_v = 2.0 scale_v = 3.0 x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32) laplace = laplace_lib.Laplace(loc=loc, scale=scale) cdf = laplace.log_cdf(x) self.assertEqual(cdf.get_shape(), (6,)) if not stats: return expected_cdf = stats.laplace.logcdf(x, loc_v, scale=scale_v) self.assertAllClose(self.evaluate(cdf), expected_cdf) def testLaplaceLogSurvivalFunction(self): batch_size = 6 loc = constant_op.constant([2.0] * batch_size) scale = constant_op.constant([3.0] * batch_size) loc_v = 2.0 scale_v = 3.0 x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32) laplace = laplace_lib.Laplace(loc=loc, scale=scale) sf = laplace.log_survival_function(x) self.assertEqual(sf.get_shape(), (6,)) if not stats: return expected_sf = stats.laplace.logsf(x, loc_v, scale=scale_v) self.assertAllClose(self.evaluate(sf), expected_sf) def testLaplaceMean(self): loc_v = np.array([1.0, 3.0, 2.5]) scale_v = np.array([1.0, 4.0, 5.0]) laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) self.assertEqual(laplace.mean().get_shape(), (3,)) if not stats: return expected_means = stats.laplace.mean(loc_v, scale=scale_v) self.assertAllClose(self.evaluate(laplace.mean()), expected_means) def testLaplaceMode(self): loc_v = np.array([0.5, 3.0, 2.5]) scale_v = np.array([1.0, 4.0, 5.0]) laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) self.assertEqual(laplace.mode().get_shape(), (3,)) self.assertAllClose(self.evaluate(laplace.mode()), loc_v) def testLaplaceVariance(self): loc_v = np.array([1.0, 3.0, 2.5]) scale_v = np.array([1.0, 4.0, 5.0]) laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) self.assertEqual(laplace.variance().get_shape(), (3,)) if not stats: return expected_variances = stats.laplace.var(loc_v, scale=scale_v) self.assertAllClose(self.evaluate(laplace.variance()), expected_variances) def testLaplaceStd(self): loc_v = np.array([1.0, 3.0, 2.5]) scale_v = np.array([1.0, 4.0, 5.0]) laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) self.assertEqual(laplace.stddev().get_shape(), (3,)) if not stats: return expected_stddev = stats.laplace.std(loc_v, scale=scale_v) self.assertAllClose(self.evaluate(laplace.stddev()), expected_stddev) def testLaplaceEntropy(self): loc_v = np.array([1.0, 3.0, 2.5]) scale_v = np.array([1.0, 4.0, 5.0]) laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) self.assertEqual(laplace.entropy().get_shape(), (3,)) if not stats: return expected_entropy = stats.laplace.entropy(loc_v, scale=scale_v) self.assertAllClose(self.evaluate(laplace.entropy()), expected_entropy) def testLaplaceSample(self): loc_v = 4.0 scale_v = 3.0 loc = constant_op.constant(loc_v) scale = constant_op.constant(scale_v) n = 100000 laplace = laplace_lib.Laplace(loc=loc, scale=scale) samples = laplace.sample(n, seed=137) sample_values = self.evaluate(samples) self.assertEqual(samples.get_shape(), (n,)) self.assertEqual(sample_values.shape, (n,)) if not stats: return self.assertAllClose( sample_values.mean(), stats.laplace.mean(loc_v, scale=scale_v), rtol=0.05, atol=0.) self.assertAllClose( sample_values.var(), stats.laplace.var(loc_v, scale=scale_v), rtol=0.05, atol=0.) self.assertTrue(self._kstest(loc_v, scale_v, sample_values)) def testLaplaceFullyReparameterized(self): loc = constant_op.constant(4.0) scale = constant_op.constant(3.0) with backprop.GradientTape() as tape: tape.watch(loc) tape.watch(scale) laplace = laplace_lib.Laplace(loc=loc, scale=scale) samples = laplace.sample(100) grad_loc, grad_scale = tape.gradient(samples, [loc, scale]) self.assertIsNotNone(grad_loc) self.assertIsNotNone(grad_scale) def testLaplaceSampleMultiDimensional(self): loc_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100 scale_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1 laplace = laplace_lib.Laplace(loc=loc_v, scale=scale_v) n = 10000 samples = laplace.sample(n, seed=137) sample_values = self.evaluate(samples) self.assertEqual(samples.get_shape(), (n, 10, 100)) self.assertEqual(sample_values.shape, (n, 10, 100)) zeros = np.zeros_like(loc_v + scale_v) # 10 x 100 loc_bc = loc_v + zeros scale_bc = scale_v + zeros if not stats: return self.assertAllClose( sample_values.mean(axis=0), stats.laplace.mean(loc_bc, scale=scale_bc), rtol=0.35, atol=0.) self.assertAllClose( sample_values.var(axis=0), stats.laplace.var(loc_bc, scale=scale_bc), rtol=0.105, atol=0.0) fails = 0 trials = 0 for ai, a in enumerate(np.reshape(loc_v, [-1])): for bi, b in enumerate(np.reshape(scale_v, [-1])): s = sample_values[:, bi, ai] trials += 1 fails += 0 if self._kstest(a, b, s) else 1 self.assertLess(fails, trials * 0.03) def _kstest(self, loc, scale, samples): # Uses the Kolmogorov-Smirnov test for goodness of fit. if not stats: return True # If scipy isn't available, return "True" for passing ks, _ = stats.kstest(samples, stats.laplace(loc, scale=scale).cdf) # Return True when the test passes. return ks < 0.02 def testLaplacePdfOfSampleMultiDims(self): laplace = laplace_lib.Laplace(loc=[7., 11.], scale=[[5.], [6.]]) num = 50000 samples = laplace.sample(num, seed=137) pdfs = laplace.prob(samples) sample_vals, pdf_vals = self.evaluate([samples, pdfs]) self.assertEqual(samples.get_shape(), (num, 2, 2)) self.assertEqual(pdfs.get_shape(), (num, 2, 2)) self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02) self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02) self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02) self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02) if not stats: return self.assertAllClose( stats.laplace.mean( [[7., 11.], [7., 11.]], scale=np.array([[5., 5.], [6., 6.]])), sample_vals.mean(axis=0), rtol=0.05, atol=0.) self.assertAllClose( stats.laplace.var([[7., 11.], [7., 11.]], scale=np.array([[5., 5.], [6., 6.]])), sample_vals.var(axis=0), rtol=0.05, atol=0.) def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3): s_p = zip(sample_vals, pdf_vals) prev = (0, 0) total = 0 for k in sorted(s_p, key=lambda x: x[0]): pair_pdf = (k[1] + prev[1]) / 2 total += (k[0] - prev[0]) * pair_pdf prev = k self.assertNear(1., total, err=err) def testLaplaceNonPositiveInitializationParamsRaises(self): loc_v = constant_op.constant(0.0, name="loc") scale_v = constant_op.constant(-1.0, name="scale") with self.assertRaisesOpError("Condition x > 0 did not hold element-wise"): laplace = laplace_lib.Laplace( loc=loc_v, scale=scale_v, validate_args=True) self.evaluate(laplace.mean()) loc_v = constant_op.constant(1.0, name="loc") scale_v = constant_op.constant(0.0, name="scale") with self.assertRaisesOpError("Condition x > 0 did not hold element-wise"): laplace = laplace_lib.Laplace( loc=loc_v, scale=scale_v, validate_args=True) self.evaluate(laplace.mean()) def testLaplaceWithSoftplusScale(self): loc_v = constant_op.constant([0.0, 1.0], name="loc") scale_v = constant_op.constant([-1.0, 2.0], name="scale") laplace = laplace_lib.LaplaceWithSoftplusScale(loc=loc_v, scale=scale_v) self.assertAllClose( self.evaluate(nn_ops.softplus(scale_v)), self.evaluate(laplace.scale)) self.assertAllClose(self.evaluate(loc_v), self.evaluate(laplace.loc)) if __name__ == "__main__": test.main()
from ..db.utils import get_cursor, set_cursor from .testmodels import FieldsWithOptionsModel, EmailModel, DateTimeModel, \ OrderedModel, BlobModel from django.db.models import Q from django.db.utils import DatabaseError from django.test import TestCase from django.utils import unittest from google.appengine.api.datastore import Get, Key import datetime import time class FilterTest(TestCase): floats = [5.3, 2.6, 9.1, 1.58] emails = ['app-engine@scholardocs.com', 'sharingan@uchias.com', 'rinnengan@sage.de', 'rasengan@naruto.com'] datetimes = [datetime.datetime(2010, 1, 1, 0, 0, 0, 0), datetime.datetime(2010, 12, 31, 23, 59, 59, 999999), datetime.datetime(2011, 1, 1, 0, 0, 0, 0), datetime.datetime(2013, 7, 28, 22, 30, 20, 50)] def setUp(self): for index, (float, email, datetime_value) in enumerate(zip(FilterTest.floats, FilterTest.emails, FilterTest.datetimes)): # ensure distinct times when saving entities time.sleep(0.01) self.last_save_time = datetime.datetime.now().time() ordered_instance = OrderedModel(priority=index, pk=index + 1) ordered_instance.save() FieldsWithOptionsModel(floating_point=float, integer=int(float), email=email, time=self.last_save_time, foreign_key=ordered_instance).save() EmailModel(email=email).save() DateTimeModel(datetime=datetime_value).save() def test_startswith(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__startswith='r').order_by('email')], ['rasengan@naruto.com', 'rinnengan@sage.de']) self.assertEquals([entity.email for entity in EmailModel.objects.filter( email__startswith='r').order_by('email')], ['rasengan@naruto.com', 'rinnengan@sage.de']) def test_gt(self): # test gt on float self.assertEquals([entity.floating_point for entity in FieldsWithOptionsModel.objects.filter( floating_point__gt=3.1).order_by('floating_point')], [5.3, 9.1]) # test gt on integer self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.filter( integer__gt=3).order_by('integer')], [5, 9]) # test filter on primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter(email__gt='as'). order_by('email')], ['rasengan@naruto.com', 'rinnengan@sage.de', 'sharingan@uchias.com', ]) # test ForeignKeys with id self.assertEquals(sorted([entity.email for entity in FieldsWithOptionsModel.objects.filter( foreign_key__gt=2)]), ['rasengan@naruto.com', 'rinnengan@sage.de', ]) # and with instance ordered_instance = OrderedModel.objects.get(priority=1) self.assertEquals(sorted([entity.email for entity in FieldsWithOptionsModel.objects.filter( foreign_key__gt=ordered_instance)]), ['rasengan@naruto.com', 'rinnengan@sage.de', ]) def test_lt(self): # test lt on float self.assertEquals([entity.floating_point for entity in FieldsWithOptionsModel.objects.filter( floating_point__lt=3.1).order_by('floating_point')], [1.58, 2.6]) # test lt on integer self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.filter( integer__lt=3).order_by('integer')], [1, 2]) # test filter on primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter(email__lt='as'). order_by('email')], ['app-engine@scholardocs.com', ]) # filter on datetime self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( time__lt=self.last_save_time).order_by('time')], ['app-engine@scholardocs.com', 'sharingan@uchias.com', 'rinnengan@sage.de',]) # test ForeignKeys with id self.assertEquals(sorted([entity.email for entity in FieldsWithOptionsModel.objects.filter( foreign_key__lt=3)]), ['app-engine@scholardocs.com', 'sharingan@uchias.com']) # and with instance ordered_instance = OrderedModel.objects.get(priority=2) self.assertEquals(sorted([entity.email for entity in FieldsWithOptionsModel.objects.filter( foreign_key__lt=ordered_instance)]), ['app-engine@scholardocs.com', 'sharingan@uchias.com']) def test_gte(self): # test gte on float self.assertEquals([entity.floating_point for entity in FieldsWithOptionsModel.objects.filter( floating_point__gte=2.6).order_by('floating_point')], [2.6, 5.3, 9.1]) # test gte on integer self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.filter( integer__gte=2).order_by('integer')], [2, 5, 9]) # test filter on primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__gte='rinnengan@sage.de').order_by('email')], ['rinnengan@sage.de', 'sharingan@uchias.com', ]) def test_lte(self): # test lte on float self.assertEquals([entity.floating_point for entity in FieldsWithOptionsModel.objects.filter( floating_point__lte=5.3).order_by('floating_point')], [1.58, 2.6, 5.3]) # test lte on integer self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.filter( integer__lte=5).order_by('integer')], [1, 2, 5]) # test filter on primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__lte='rinnengan@sage.de').order_by('email')], ['app-engine@scholardocs.com', 'rasengan@naruto.com', 'rinnengan@sage.de']) def test_equals(self): # test equality filter on primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email='rinnengan@sage.de').order_by('email')], ['rinnengan@sage.de']) def test_is_null(self): self.assertEquals(FieldsWithOptionsModel.objects.filter( floating_point__isnull=True).count(), 0) FieldsWithOptionsModel(integer=5.4, email='shinra.tensai@sixpaths.com', time=datetime.datetime.now().time()).save() self.assertEquals(FieldsWithOptionsModel.objects.filter( floating_point__isnull=True).count(), 1) # XXX: These filters will not work because of a Django bug # self.assertEquals(FieldsWithOptionsModel.objects.filter( # foreign_key=None).count(), 1) # (it uses left outer joins if checked against isnull # self.assertEquals(FieldsWithOptionsModel.objects.filter( # foreign_key__isnull=True).count(), 1) def test_exclude(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.all().exclude( floating_point__lt=9.1).order_by('floating_point')], ['rinnengan@sage.de', ]) # test exclude with foreignKey ordered_instance = OrderedModel.objects.get(priority=1) self.assertEquals(sorted([entity.email for entity in FieldsWithOptionsModel.objects.all().exclude( foreign_key__gt=ordered_instance)]), ['app-engine@scholardocs.com', 'sharingan@uchias.com',]) def test_exclude_pk(self): self.assertEquals([entity.pk for entity in OrderedModel.objects.exclude(pk__in=[2, 3]) .order_by('pk')], [1, 4]) def test_chained_filter(self): # additionally tests count :) self.assertEquals(FieldsWithOptionsModel.objects.filter( floating_point__lt=5.3, floating_point__gt=2.6). count(), 0) # test across multiple columns. On app engine only one filter is allowed # to be an inequality filter self.assertEquals([(entity.floating_point, entity.integer) for entity in FieldsWithOptionsModel.objects.filter( floating_point__lte=5.3, integer=2).order_by( 'floating_point')], [(2.6, 2), ]) # test multiple filters including the primary_key field self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__gte='rinnengan@sage.de', integer=2).order_by( 'email')], ['sharingan@uchias.com', ]) # test in filter on primary key with another arbitrary filter self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__in=['rinnengan@sage.de', 'sharingan@uchias.com'], integer__gt=2).order_by( 'integer')], ['rinnengan@sage.de', ]) # Test exceptions # test multiple filters exception when filtered and not ordered against # the first filter self.assertRaises(DatabaseError, lambda: FieldsWithOptionsModel.objects.filter( email__gte='rinnengan@sage.de', floating_point=5.3).order_by( 'floating_point')[0]) # test exception if filtered across multiple columns with inequality filter self.assertRaises(DatabaseError, FieldsWithOptionsModel.objects.filter( floating_point__lte=5.3, integer__gte=2).order_by( 'floating_point').get) # test exception if filtered across multiple columns with inequality filter # with exclude self.assertRaises(DatabaseError, FieldsWithOptionsModel.objects.filter( email__lte='rinnengan@sage.de').exclude( floating_point__lt=9.1).order_by('email').get) self.assertRaises(DatabaseError, lambda: FieldsWithOptionsModel.objects.all().exclude( floating_point__lt=9.1).order_by('email')[0]) # TODO: Maybe check all possible exceptions def test_slicing(self): # test slicing on filter with primary_key self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__lte='rinnengan@sage.de').order_by('email')[:2]], ['app-engine@scholardocs.com', 'rasengan@naruto.com', ]) self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__lte='rinnengan@sage.de').order_by('email')[1:2]], ['rasengan@naruto.com', ]) # test on non pk field self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.all().order_by( 'integer')[:2]], [1, 2, ]) self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.all().order_by( 'email')[::2]], ['app-engine@scholardocs.com', 'rinnengan@sage.de',]) def test_cursor(self): results = list(FieldsWithOptionsModel.objects.all()) cursor = None for item in results: query = FieldsWithOptionsModel.objects.all()[:1] if cursor is not None: query = set_cursor(query, cursor) next = query[0] self.assertEqual(next.pk, item.pk) cursor = get_cursor(query) query = set_cursor(FieldsWithOptionsModel.objects.all(), cursor) self.assertEqual(list(query[:1]), []) def test_Q_objects(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( Q(email__lte='rinnengan@sage.de')).order_by('email')][:2], ['app-engine@scholardocs.com', 'rasengan@naruto.com', ]) self.assertEquals([entity.integer for entity in FieldsWithOptionsModel.objects.exclude(Q(integer__lt=5) | Q(integer__gte=9)).order_by('integer')], [5, ]) self.assertRaises(TypeError, FieldsWithOptionsModel.objects.filter( Q(floating_point=9.1), Q(integer=9) | Q(integer=2))) def test_pk_in(self): # test pk__in with field name email self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( email__in=['app-engine@scholardocs.com', 'rasengan@naruto.com'])], ['app-engine@scholardocs.com', 'rasengan@naruto.com']) def test_in(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( floating_point__in=[5.3, 2.6, 1.58]).filter( integer__in=[1, 5, 9])], ['app-engine@scholardocs.com', 'rasengan@naruto.com']) def test_in_with_pk_in(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( floating_point__in=[5.3, 2.6, 1.58]).filter( email__in=['app-engine@scholardocs.com', 'rasengan@naruto.com'])], ['app-engine@scholardocs.com', 'rasengan@naruto.com']) def test_inequality(self): self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.exclude( floating_point=5.3).filter( integer__in=[1, 5, 9])], ['rasengan@naruto.com', 'rinnengan@sage.de']) def test_values(self): # test values() self.assertEquals([entity['pk'] for entity in FieldsWithOptionsModel.objects.filter(integer__gt=3). order_by('integer').values('pk')], ['app-engine@scholardocs.com', 'rinnengan@sage.de']) self.assertEquals(FieldsWithOptionsModel.objects.filter(integer__gt=3). order_by('integer').values('pk').count(), 2) # these queries first fetch the whole entity and then only return the # desired fields selected in .values self.assertEquals([entity['integer'] for entity in FieldsWithOptionsModel.objects.filter( email__startswith='r').order_by('email').values( 'integer')], [1, 9]) self.assertEquals([entity['floating_point'] for entity in FieldsWithOptionsModel.objects.filter(integer__gt=3). order_by('integer').values('floating_point')], [5.3, 9.1]) # test values_list self.assertEquals([entity[0] for entity in FieldsWithOptionsModel.objects.filter(integer__gt=3). order_by('integer').values_list('pk')], ['app-engine@scholardocs.com', 'rinnengan@sage.de']) def test_range(self): # test range on float self.assertEquals([entity.floating_point for entity in FieldsWithOptionsModel.objects.filter( floating_point__range=(2.6, 9.1)). order_by('floating_point')], [2.6, 5.3, 9.1,]) # test range on pk self.assertEquals([entity.pk for entity in FieldsWithOptionsModel.objects.filter( pk__range=('app-engine@scholardocs.com', 'rinnengan@sage.de')). order_by('pk')], ['app-engine@scholardocs.com', 'rasengan@naruto.com', 'rinnengan@sage.de',]) # test range on date/datetime objects start_time = datetime.time(self.last_save_time.hour, self.last_save_time.minute - 1, self.last_save_time.second, self.last_save_time.microsecond) self.assertEquals([entity.email for entity in FieldsWithOptionsModel.objects.filter( time__range=(start_time, self.last_save_time)).order_by('time')], ['app-engine@scholardocs.com', 'sharingan@uchias.com', 'rinnengan@sage.de', 'rasengan@naruto.com',]) def test_date(self): # test year on date range boundaries self.assertEquals([entity.datetime for entity in DateTimeModel.objects.filter( datetime__year=2010).order_by('datetime')], [datetime.datetime(2010, 1, 1, 0, 0, 0, 0), datetime.datetime(2010, 12, 31, 23, 59, 59, 999999),]) # test year on non boundary date self.assertEquals([entity.datetime for entity in DateTimeModel.objects.filter( datetime__year=2013).order_by('datetime')], [datetime.datetime(2013, 7, 28, 22, 30, 20, 50),]) def test_auto_now(self): time.sleep(0.1) entity = DateTimeModel.objects.all()[0] auto_now = entity.datetime_auto_now entity.save() entity = DateTimeModel.objects.get(pk=entity.pk) self.assertNotEqual(auto_now, entity.datetime_auto_now) def test_auto_now_add(self): time.sleep(0.1) entity = DateTimeModel.objects.all()[0] auto_now_add = entity.datetime_auto_now_add entity.save() entity = DateTimeModel.objects.get(pk=entity.pk) self.assertEqual(auto_now_add, entity.datetime_auto_now_add) def test_latest(self): self.assertEquals(FieldsWithOptionsModel.objects.latest('time').floating_point, 1.58) def test_blob(self): x = BlobModel(data='lalala') x.full_clean() x.save() e = Get(Key.from_path(BlobModel._meta.db_table, x.pk)) self.assertEqual(e['data'], x.data) x = BlobModel.objects.all()[0] self.assertEqual(e['data'], x.data)
#!/usr/bin/python -u # Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import os from os import kill from signal import SIGTERM from subprocess import Popen from time import sleep from uuid import uuid4 import eventlet import sqlite3 from swift.common import client, direct_client from swift.common.utils import hash_path, readconf from test.probe.common import get_to_final_state, kill_pids, reset_environment class TestContainerFailures(unittest.TestCase): def setUp(self): self.pids, self.port2server, self.account_ring, self.container_ring, \ self.object_ring, self.url, self.token, self.account = \ reset_environment() def tearDown(self): kill_pids(self.pids) def test_first_node_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.pids[self.port2server[cnodes[0]['port']]] = \ Popen(['swift-container-server', '/etc/swift/container-server/%d.conf' % ((cnodes[0]['port'] - 6001) / 10)]).pid sleep(2) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') # First node still doesn't know object1 was deleted yet; this is okay. self.assert_(object1 in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # And, of course, our new object2 exists. self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) def test_second_node_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) kill(self.pids[self.port2server[cnodes[1]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.pids[self.port2server[cnodes[1]['port']]] = \ Popen(['swift-container-server', '/etc/swift/container-server/%d.conf' % ((cnodes[1]['port'] - 6001) / 10)]).pid sleep(2) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') self.assert_(object1 not in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # And, of course, our new object2 exists. self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) def test_first_two_nodes_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) for x in xrange(2): kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) for x in xrange(2): self.pids[self.port2server[cnodes[x]['port']]] = \ Popen(['swift-container-server', '/etc/swift/container-server/%d.conf' % ((cnodes[x]['port'] - 6001) / 10)]).pid sleep(2) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # This fails because all three nodes have to indicate deletion before # we tell the user it worked. Since the first node 409s (it hasn't got # the update that the object was deleted yet), the whole must 503 # (until every is synced up, then the delete would work). exc = None try: client.delete_container(self.url, self.token, container) except client.ClientException, err: exc = err self.assert_(exc) self.assert_(exc.http_status, 503) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') # First node still doesn't know object1 was deleted yet; this is okay. self.assert_(object1 in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # And, of course, our new object2 exists. self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) def test_last_two_nodes_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) for x in (1, 2): kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) for x in (1, 2): self.pids[self.port2server[cnodes[x]['port']]] = \ Popen(['swift-container-server', '/etc/swift/container-server/%d.conf' % ((cnodes[x]['port'] - 6001) / 10)]).pid sleep(2) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # This fails because all three nodes have to indicate deletion before # we tell the user it worked. Since the first node 409s (it hasn't got # the update that the object was deleted yet), the whole must 503 # (until every is synced up, then the delete would work). exc = None try: client.delete_container(self.url, self.token, container) except client.ClientException, err: exc = err self.assert_(exc) self.assert_(exc.http_status, 503) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') self.assert_(object1 not in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # And, of course, our new object2 exists. self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) def _get_db_file_path(self, obj_dir): files = sorted(os.listdir(obj_dir), reverse=True) for file in files: if file.endswith('db'): return os.path.join(obj_dir, file) def _get_container_db_files(self, container): opart, onodes = self.container_ring.get_nodes(self.account, container) onode = onodes[0] db_files = [] for onode in onodes: node_id = (onode['port'] - 6000) / 10 device = onode['device'] hash_str = hash_path(self.account, container) server_conf = readconf('/etc/swift/container-server/%s.conf' % node_id) devices = server_conf['app:container-server']['devices'] obj_dir = '%s/%s/containers/%s/%s/%s/' % (devices, device, opart, hash_str[-3:], hash_str) db_files.append(self._get_db_file_path(obj_dir)) return db_files def test_locked_container_dbs(self): def run_test(num_locks, catch_503): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) db_files = self._get_container_db_files(container) db_conns = [] for i in range(num_locks): db_conn = sqlite3.connect(db_files[i]) db_conn.execute('begin exclusive transaction') db_conns.append(db_conn) if catch_503: try: client.delete_container(self.url, self.token, container) except client.ClientException, e: self.assertEquals(e.http_status, 503) else: client.delete_container(self.url, self.token, container) pool = eventlet.GreenPool() try: with eventlet.Timeout(15): p = pool.spawn(run_test, 1, False) r = pool.spawn(run_test, 2, True) q = pool.spawn(run_test, 3, True) pool.waitall() except eventlet.Timeout, e: raise Exception( "The server did not return a 503 on container db locks, " "it just hangs: %s" % e) if __name__ == '__main__': unittest.main()
# Copyright 2015 FUJITSU LIMITED # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test class for iRMC Deploy Driver """ import os import shutil import tempfile import mock from oslo_config import cfg import six from ironic.common import boot_devices from ironic.common import exception from ironic.common.glance_service import service_utils from ironic.common.i18n import _ from ironic.common import images from ironic.common import states from ironic.common import utils from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils from ironic.drivers.modules import agent from ironic.drivers.modules import agent_base_vendor from ironic.drivers.modules import deploy_utils from ironic.drivers.modules.irmc import common as irmc_common from ironic.drivers.modules.irmc import deploy as irmc_deploy from ironic.drivers.modules import iscsi_deploy from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils from ironic.tests.unit.objects import utils as obj_utils if six.PY3: import io file = io.BytesIO INFO_DICT = db_utils.get_test_irmc_info() CONF = cfg.CONF class IRMCDeployPrivateMethodsTestCase(db_base.DbTestCase): def setUp(self): irmc_deploy._check_share_fs_mounted_patcher.start() self.addCleanup(irmc_deploy._check_share_fs_mounted_patcher.stop) super(IRMCDeployPrivateMethodsTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver='iscsi_irmc') self.node = obj_utils.create_test_node( self.context, driver='iscsi_irmc', driver_info=INFO_DICT) CONF.irmc.remote_image_share_root = '/remote_image_share_root' CONF.irmc.remote_image_server = '10.20.30.40' CONF.irmc.remote_image_share_type = 'NFS' CONF.irmc.remote_image_share_name = 'share' CONF.irmc.remote_image_user_name = 'admin' CONF.irmc.remote_image_user_password = 'admin0' CONF.irmc.remote_image_user_domain = 'local' @mock.patch.object(os.path, 'isdir', spec_set=True, autospec=True) def test__parse_config_option(self, isdir_mock): isdir_mock.return_value = True result = irmc_deploy._parse_config_option() isdir_mock.assert_called_once_with('/remote_image_share_root') self.assertIsNone(result) @mock.patch.object(os.path, 'isdir', spec_set=True, autospec=True) def test__parse_config_option_non_existed_root(self, isdir_mock): CONF.irmc.remote_image_share_root = '/non_existed_root' isdir_mock.return_value = False self.assertRaises(exception.InvalidParameterValue, irmc_deploy._parse_config_option) isdir_mock.assert_called_once_with('/non_existed_root') @mock.patch.object(os.path, 'isdir', spec_set=True, autospec=True) def test__parse_config_option_wrong_share_type(self, isdir_mock): CONF.irmc.remote_image_share_type = 'NTFS' isdir_mock.return_value = True self.assertRaises(exception.InvalidParameterValue, irmc_deploy._parse_config_option) isdir_mock.assert_called_once_with('/remote_image_share_root') @mock.patch.object(os.path, 'isfile', spec_set=True, autospec=True) def test__parse_driver_info_in_share(self, isfile_mock): """With required 'irmc_deploy_iso' in share.""" isfile_mock.return_value = True self.node.driver_info['irmc_deploy_iso'] = 'deploy.iso' driver_info_expected = {'irmc_deploy_iso': 'deploy.iso'} driver_info_actual = irmc_deploy._parse_driver_info(self.node) isfile_mock.assert_called_once_with( '/remote_image_share_root/deploy.iso') self.assertEqual(driver_info_expected, driver_info_actual) @mock.patch.object(service_utils, 'is_image_href_ordinary_file_name', spec_set=True, autospec=True) def test__parse_driver_info_not_in_share( self, is_image_href_ordinary_file_name_mock): """With required 'irmc_deploy_iso' not in share.""" self.node.driver_info[ 'irmc_deploy_iso'] = 'bc784057-a140-4130-add3-ef890457e6b3' driver_info_expected = {'irmc_deploy_iso': 'bc784057-a140-4130-add3-ef890457e6b3'} is_image_href_ordinary_file_name_mock.return_value = False driver_info_actual = irmc_deploy._parse_driver_info(self.node) self.assertEqual(driver_info_expected, driver_info_actual) @mock.patch.object(os.path, 'isfile', spec_set=True, autospec=True) def test__parse_driver_info_with_deploy_iso_invalid(self, isfile_mock): """With required 'irmc_deploy_iso' non existed.""" isfile_mock.return_value = False with task_manager.acquire(self.context, self.node.uuid) as task: task.node.driver_info['irmc_deploy_iso'] = 'deploy.iso' error_msg = (_("Deploy ISO file, %(deploy_iso)s, " "not found for node: %(node)s.") % {'deploy_iso': '/remote_image_share_root/deploy.iso', 'node': task.node.uuid}) e = self.assertRaises(exception.InvalidParameterValue, irmc_deploy._parse_driver_info, task.node) self.assertEqual(error_msg, str(e)) def test__parse_driver_info_with_deploy_iso_missing(self): """With required 'irmc_deploy_iso' empty.""" self.node.driver_info['irmc_deploy_iso'] = None error_msg = ("Error validating iRMC virtual media deploy. Some" " parameters were missing in node's driver_info." " Missing are: ['irmc_deploy_iso']") e = self.assertRaises(exception.MissingParameterValue, irmc_deploy._parse_driver_info, self.node) self.assertEqual(error_msg, str(e)) def test__parse_instance_info_with_boot_iso_file_name_ok(self): """With optional 'irmc_boot_iso' file name.""" CONF.irmc.remote_image_share_root = '/etc' self.node.instance_info['irmc_boot_iso'] = 'hosts' instance_info_expected = {'irmc_boot_iso': 'hosts'} instance_info_actual = irmc_deploy._parse_instance_info(self.node) self.assertEqual(instance_info_expected, instance_info_actual) def test__parse_instance_info_without_boot_iso_ok(self): """With optional no 'irmc_boot_iso' file name.""" CONF.irmc.remote_image_share_root = '/etc' self.node.instance_info['irmc_boot_iso'] = None instance_info_expected = {} instance_info_actual = irmc_deploy._parse_instance_info(self.node) self.assertEqual(instance_info_expected, instance_info_actual) def test__parse_instance_info_with_boot_iso_uuid_ok(self): """With optional 'irmc_boot_iso' glance uuid.""" self.node.instance_info[ 'irmc_boot_iso'] = 'bc784057-a140-4130-add3-ef890457e6b3' instance_info_expected = {'irmc_boot_iso': 'bc784057-a140-4130-add3-ef890457e6b3'} instance_info_actual = irmc_deploy._parse_instance_info(self.node) self.assertEqual(instance_info_expected, instance_info_actual) def test__parse_instance_info_with_boot_iso_glance_ok(self): """With optional 'irmc_boot_iso' glance url.""" self.node.instance_info['irmc_boot_iso'] = ( 'glance://bc784057-a140-4130-add3-ef890457e6b3') instance_info_expected = { 'irmc_boot_iso': 'glance://bc784057-a140-4130-add3-ef890457e6b3', } instance_info_actual = irmc_deploy._parse_instance_info(self.node) self.assertEqual(instance_info_expected, instance_info_actual) def test__parse_instance_info_with_boot_iso_http_ok(self): """With optional 'irmc_boot_iso' http url.""" self.node.driver_info[ 'irmc_deploy_iso'] = 'http://irmc_boot_iso' driver_info_expected = {'irmc_deploy_iso': 'http://irmc_boot_iso'} driver_info_actual = irmc_deploy._parse_driver_info(self.node) self.assertEqual(driver_info_expected, driver_info_actual) def test__parse_instance_info_with_boot_iso_https_ok(self): """With optional 'irmc_boot_iso' https url.""" self.node.instance_info[ 'irmc_boot_iso'] = 'https://irmc_boot_iso' instance_info_expected = {'irmc_boot_iso': 'https://irmc_boot_iso'} instance_info_actual = irmc_deploy._parse_instance_info(self.node) self.assertEqual(instance_info_expected, instance_info_actual) def test__parse_instance_info_with_boot_iso_file_url_ok(self): """With optional 'irmc_boot_iso' file url.""" self.node.instance_info[ 'irmc_boot_iso'] = 'file://irmc_boot_iso' instance_info_expected = {'irmc_boot_iso': 'file://irmc_boot_iso'} instance_info_actual = irmc_deploy._parse_instance_info(self.node) self.assertEqual(instance_info_expected, instance_info_actual) @mock.patch.object(os.path, 'isfile', spec_set=True, autospec=True) def test__parse_instance_info_with_boot_iso_invalid(self, isfile_mock): CONF.irmc.remote_image_share_root = '/etc' isfile_mock.return_value = False with task_manager.acquire(self.context, self.node.uuid) as task: task.node.instance_info['irmc_boot_iso'] = 'hosts~non~existed' error_msg = (_("Boot ISO file, %(boot_iso)s, " "not found for node: %(node)s.") % {'boot_iso': '/etc/hosts~non~existed', 'node': task.node.uuid}) e = self.assertRaises(exception.InvalidParameterValue, irmc_deploy._parse_instance_info, task.node) self.assertEqual(error_msg, str(e)) @mock.patch.object(iscsi_deploy, 'parse_instance_info', spec_set=True, autospec=True) @mock.patch('os.path.isfile', autospec=True) def test__parse_deploy_info_ok(self, mock_isfile, instance_info_mock): CONF.irmc.remote_image_share_root = '/etc' instance_info_mock.return_value = {'a': 'b'} driver_info_expected = {'a': 'b', 'irmc_deploy_iso': 'hosts', 'irmc_boot_iso': 'fstab'} with task_manager.acquire(self.context, self.node.uuid) as task: task.node.driver_info['irmc_deploy_iso'] = 'hosts' task.node.instance_info['irmc_boot_iso'] = 'fstab' driver_info_actual = irmc_deploy._parse_deploy_info(task.node) self.assertEqual(driver_info_expected, driver_info_actual) boot_iso_path = os.path.join( CONF.irmc.remote_image_share_root, task.node.instance_info['irmc_boot_iso'] ) mock_isfile.assert_any_call(boot_iso_path) @mock.patch.object(manager_utils, 'node_power_action', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, 'setup_vmedia_for_boot', spec_set=True, autospec=True) @mock.patch.object(images, 'fetch', spec_set=True, autospec=True) def test__reboot_into_deploy_iso_with_file(self, fetch_mock, setup_vmedia_mock, set_boot_device_mock, node_power_action_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.driver_info['irmc_deploy_iso'] = 'deploy_iso_filename' ramdisk_opts = {'a': 'b'} irmc_deploy._reboot_into_deploy_iso(task, ramdisk_opts) self.assertFalse(fetch_mock.called) setup_vmedia_mock.assert_called_once_with( task, 'deploy_iso_filename', ramdisk_opts) set_boot_device_mock.assert_called_once_with(task, boot_devices.CDROM) node_power_action_mock.assert_called_once_with(task, states.REBOOT) @mock.patch.object(manager_utils, 'node_power_action', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, 'setup_vmedia_for_boot', spec_set=True, autospec=True) @mock.patch.object(images, 'fetch', spec_set=True, autospec=True) @mock.patch.object(service_utils, 'is_image_href_ordinary_file_name', spec_set=True, autospec=True) def test__reboot_into_deploy_iso_with_image_service( self, is_image_href_ordinary_file_name_mock, fetch_mock, setup_vmedia_mock, set_boot_device_mock, node_power_action_mock): CONF.irmc.remote_image_share_root = '/' is_image_href_ordinary_file_name_mock.return_value = False with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.driver_info['irmc_deploy_iso'] = 'glance://deploy_iso' ramdisk_opts = {'a': 'b'} irmc_deploy._reboot_into_deploy_iso(task, ramdisk_opts) fetch_mock.assert_called_once_with( task.context, 'glance://deploy_iso', "/deploy-%s.iso" % self.node.uuid) setup_vmedia_mock.assert_called_once_with( task, "deploy-%s.iso" % self.node.uuid, ramdisk_opts) set_boot_device_mock.assert_called_once_with( task, boot_devices.CDROM) node_power_action_mock.assert_called_once_with( task, states.REBOOT) def test__get_deploy_iso_name(self): actual = irmc_deploy._get_deploy_iso_name(self.node) expected = "deploy-%s.iso" % self.node.uuid self.assertEqual(expected, actual) def test__get_boot_iso_name(self): actual = irmc_deploy._get_boot_iso_name(self.node) expected = "boot-%s.iso" % self.node.uuid self.assertEqual(expected, actual) @mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy', spec_set=True, autospec=True) @mock.patch.object(images, 'get_image_properties', spec_set=True, autospec=True) @mock.patch.object(images, 'fetch', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_parse_deploy_info', spec_set=True, autospec=True) def test__prepare_boot_iso_file(self, deploy_info_mock, fetch_mock, image_props_mock, boot_mode_mock, create_boot_iso_mock): deploy_info_mock.return_value = {'irmc_boot_iso': 'irmc_boot.iso'} with task_manager.acquire(self.context, self.node.uuid) as task: irmc_deploy._prepare_boot_iso(task, 'root-uuid') deploy_info_mock.assert_called_once_with(task.node) self.assertFalse(fetch_mock.called) self.assertFalse(image_props_mock.called) self.assertFalse(boot_mode_mock.called) self.assertFalse(create_boot_iso_mock.called) task.node.refresh() self.assertEqual('irmc_boot.iso', task.node.driver_internal_info['irmc_boot_iso']) @mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy', spec_set=True, autospec=True) @mock.patch.object(images, 'get_image_properties', spec_set=True, autospec=True) @mock.patch.object(images, 'fetch', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_parse_deploy_info', spec_set=True, autospec=True) @mock.patch.object(service_utils, 'is_image_href_ordinary_file_name', spec_set=True, autospec=True) def test__prepare_boot_iso_fetch_ok(self, is_image_href_ordinary_file_name_mock, deploy_info_mock, fetch_mock, image_props_mock, boot_mode_mock, create_boot_iso_mock): CONF.irmc.remote_image_share_root = '/' image = '733d1c44-a2ea-414b-aca7-69decf20d810' is_image_href_ordinary_file_name_mock.return_value = False deploy_info_mock.return_value = {'irmc_boot_iso': image} with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.instance_info['irmc_boot_iso'] = image irmc_deploy._prepare_boot_iso(task, 'root-uuid') deploy_info_mock.assert_called_once_with(task.node) fetch_mock.assert_called_once_with( task.context, image, "/boot-%s.iso" % self.node.uuid) self.assertFalse(image_props_mock.called) self.assertFalse(boot_mode_mock.called) self.assertFalse(create_boot_iso_mock.called) task.node.refresh() self.assertEqual("boot-%s.iso" % self.node.uuid, task.node.driver_internal_info['irmc_boot_iso']) @mock.patch.object(images, 'create_boot_iso', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy', spec_set=True, autospec=True) @mock.patch.object(images, 'get_image_properties', spec_set=True, autospec=True) @mock.patch.object(images, 'fetch', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_parse_deploy_info', spec_set=True, autospec=True) def test__prepare_boot_iso_create_ok(self, deploy_info_mock, fetch_mock, image_props_mock, boot_mode_mock, create_boot_iso_mock): CONF.pxe.pxe_append_params = 'kernel-params' deploy_info_mock.return_value = {'image_source': 'image-uuid'} image_props_mock.return_value = {'kernel_id': 'kernel_uuid', 'ramdisk_id': 'ramdisk_uuid'} CONF.irmc.remote_image_share_name = '/remote_image_share_root' boot_mode_mock.return_value = 'uefi' with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: irmc_deploy._prepare_boot_iso(task, 'root-uuid') self.assertFalse(fetch_mock.called) deploy_info_mock.assert_called_once_with(task.node) image_props_mock.assert_called_once_with( task.context, 'image-uuid', ['kernel_id', 'ramdisk_id']) create_boot_iso_mock.assert_called_once_with( task.context, '/remote_image_share_root/' + "boot-%s.iso" % self.node.uuid, 'kernel_uuid', 'ramdisk_uuid', 'file:///remote_image_share_root/' + "deploy-%s.iso" % self.node.uuid, 'root-uuid', 'kernel-params', 'uefi') task.node.refresh() self.assertEqual("boot-%s.iso" % self.node.uuid, task.node.driver_internal_info['irmc_boot_iso']) def test__get_floppy_image_name(self): actual = irmc_deploy._get_floppy_image_name(self.node) expected = "image-%s.img" % self.node.uuid self.assertEqual(expected, actual) @mock.patch.object(shutil, 'copyfile', spec_set=True, autospec=True) @mock.patch.object(images, 'create_vfat_image', spec_set=True, autospec=True) @mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True, autospec=True) def test__prepare_floppy_image(self, tempfile_mock, create_vfat_image_mock, copyfile_mock): mock_image_file_handle = mock.MagicMock(spec=file) mock_image_file_obj = mock.MagicMock() mock_image_file_obj.name = 'image-tmp-file' mock_image_file_handle.__enter__.return_value = mock_image_file_obj tempfile_mock.side_effect = iter([mock_image_file_handle]) deploy_args = {'arg1': 'val1', 'arg2': 'val2'} CONF.irmc.remote_image_share_name = '/remote_image_share_root' with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: irmc_deploy._prepare_floppy_image(task, deploy_args) create_vfat_image_mock.assert_called_once_with( 'image-tmp-file', parameters=deploy_args) copyfile_mock.assert_called_once_with( 'image-tmp-file', '/remote_image_share_root/' + "image-%s.img" % self.node.uuid) @mock.patch.object(shutil, 'copyfile', spec_set=True, autospec=True) @mock.patch.object(images, 'create_vfat_image', spec_set=True, autospec=True) @mock.patch.object(tempfile, 'NamedTemporaryFile', spec_set=True, autospec=True) def test__prepare_floppy_image_exception(self, tempfile_mock, create_vfat_image_mock, copyfile_mock): mock_image_file_handle = mock.MagicMock(spec=file) mock_image_file_obj = mock.MagicMock() mock_image_file_obj.name = 'image-tmp-file' mock_image_file_handle.__enter__.return_value = mock_image_file_obj tempfile_mock.side_effect = iter([mock_image_file_handle]) deploy_args = {'arg1': 'val1', 'arg2': 'val2'} CONF.irmc.remote_image_share_name = '/remote_image_share_root' copyfile_mock.side_effect = iter([IOError("fake error")]) with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.IRMCOperationError, irmc_deploy._prepare_floppy_image, task, deploy_args) create_vfat_image_mock.assert_called_once_with( 'image-tmp-file', parameters=deploy_args) copyfile_mock.assert_called_once_with( 'image-tmp-file', '/remote_image_share_root/' + "image-%s.img" % self.node.uuid) @mock.patch.object(irmc_deploy, '_attach_virtual_cd', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_attach_virtual_fd', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_prepare_floppy_image', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_detach_virtual_fd', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_detach_virtual_cd', spec_set=True, autospec=True) def test_setup_vmedia_for_boot_with_parameters(self, _detach_virtual_cd_mock, _detach_virtual_fd_mock, _prepare_floppy_image_mock, _attach_virtual_fd_mock, _attach_virtual_cd_mock): parameters = {'a': 'b'} iso_filename = 'deploy_iso_or_boot_iso' _prepare_floppy_image_mock.return_value = 'floppy_file_name' with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: irmc_deploy.setup_vmedia_for_boot(task, iso_filename, parameters) _detach_virtual_cd_mock.assert_called_once_with(task.node) _detach_virtual_fd_mock.assert_called_once_with(task.node) _prepare_floppy_image_mock.assert_called_once_with(task, parameters) _attach_virtual_fd_mock.assert_called_once_with(task.node, 'floppy_file_name') _attach_virtual_cd_mock.assert_called_once_with(task.node, iso_filename) @mock.patch.object(irmc_deploy, '_attach_virtual_cd', autospec=True) @mock.patch.object(irmc_deploy, '_detach_virtual_fd', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_detach_virtual_cd', spec_set=True, autospec=True) def test_setup_vmedia_for_boot_without_parameters( self, _detach_virtual_cd_mock, _detach_virtual_fd_mock, _attach_virtual_cd_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: irmc_deploy.setup_vmedia_for_boot(task, 'bootable_iso_filename') _detach_virtual_cd_mock.assert_called_once_with(task.node) _detach_virtual_fd_mock.assert_called_once_with(task.node) _attach_virtual_cd_mock.assert_called_once_with( task.node, 'bootable_iso_filename') @mock.patch.object(irmc_deploy, '_get_deploy_iso_name', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_get_floppy_image_name', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_remove_share_file', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_detach_virtual_fd', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_detach_virtual_cd', spec_set=True, autospec=True) def test__cleanup_vmedia_boot_ok(self, _detach_virtual_cd_mock, _detach_virtual_fd_mock, _remove_share_file_mock, _get_floppy_image_name_mock, _get_deploy_iso_name_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: irmc_deploy._cleanup_vmedia_boot(task) _detach_virtual_cd_mock.assert_called_once_with(task.node) _detach_virtual_fd_mock.assert_called_once_with(task.node) _get_floppy_image_name_mock.assert_called_once_with(task.node) _get_deploy_iso_name_mock.assert_called_once_with(task.node) self.assertTrue(_remove_share_file_mock.call_count, 2) _remove_share_file_mock.assert_has_calls( [mock.call(_get_floppy_image_name_mock(task.node)), mock.call(_get_deploy_iso_name_mock(task.node))]) @mock.patch.object(utils, 'unlink_without_raise', spec_set=True, autospec=True) def test__remove_share_file(self, unlink_without_raise_mock): CONF.irmc.remote_image_share_name = '/' irmc_deploy._remove_share_file("boot.iso") unlink_without_raise_mock.assert_called_once_with('/boot.iso') @mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True, autospec=True) def test__attach_virtual_cd_ok(self, get_irmc_client_mock): irmc_client = get_irmc_client_mock.return_value irmc_deploy.scci.get_virtual_cd_set_params_cmd = ( mock.MagicMock(sepc_set=[])) cd_set_params = (irmc_deploy.scci .get_virtual_cd_set_params_cmd.return_value) CONF.irmc.remote_image_server = '10.20.30.40' CONF.irmc.remote_image_user_domain = 'local' CONF.irmc.remote_image_share_type = 'NFS' CONF.irmc.remote_image_share_name = 'share' CONF.irmc.remote_image_user_name = 'admin' CONF.irmc.remote_image_user_password = 'admin0' irmc_deploy.scci.get_share_type.return_value = 0 with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: irmc_deploy._attach_virtual_cd(task.node, 'iso_filename') get_irmc_client_mock.assert_called_once_with(task.node) (irmc_deploy.scci.get_virtual_cd_set_params_cmd .assert_called_once_with)('10.20.30.40', 'local', 0, 'share', 'iso_filename', 'admin', 'admin0') irmc_client.assert_has_calls( [mock.call(cd_set_params, async=False), mock.call(irmc_deploy.scci.MOUNT_CD, async=False)]) @mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True, autospec=True) def test__attach_virtual_cd_fail(self, get_irmc_client_mock): irmc_client = get_irmc_client_mock.return_value irmc_client.side_effect = Exception("fake error") irmc_deploy.scci.SCCIClientError = Exception with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: e = self.assertRaises(exception.IRMCOperationError, irmc_deploy._attach_virtual_cd, task.node, 'iso_filename') get_irmc_client_mock.assert_called_once_with(task.node) self.assertEqual("iRMC Inserting virtual cdrom failed. " + "Reason: fake error", str(e)) @mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True, autospec=True) def test__detach_virtual_cd_ok(self, get_irmc_client_mock): irmc_client = get_irmc_client_mock.return_value with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: irmc_deploy._detach_virtual_cd(task.node) irmc_client.assert_called_once_with(irmc_deploy.scci.UNMOUNT_CD) @mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True, autospec=True) def test__detach_virtual_cd_fail(self, get_irmc_client_mock): irmc_client = get_irmc_client_mock.return_value irmc_client.side_effect = Exception("fake error") irmc_deploy.scci.SCCIClientError = Exception with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: e = self.assertRaises(exception.IRMCOperationError, irmc_deploy._detach_virtual_cd, task.node) self.assertEqual("iRMC Ejecting virtual cdrom failed. " + "Reason: fake error", str(e)) @mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True, autospec=True) def test__attach_virtual_fd_ok(self, get_irmc_client_mock): irmc_client = get_irmc_client_mock.return_value irmc_deploy.scci.get_virtual_fd_set_params_cmd = ( mock.MagicMock(sepc_set=[])) fd_set_params = (irmc_deploy.scci .get_virtual_fd_set_params_cmd.return_value) CONF.irmc.remote_image_server = '10.20.30.40' CONF.irmc.remote_image_user_domain = 'local' CONF.irmc.remote_image_share_type = 'NFS' CONF.irmc.remote_image_share_name = 'share' CONF.irmc.remote_image_user_name = 'admin' CONF.irmc.remote_image_user_password = 'admin0' irmc_deploy.scci.get_share_type.return_value = 0 with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: irmc_deploy._attach_virtual_fd(task.node, 'floppy_image_filename') get_irmc_client_mock.assert_called_once_with(task.node) (irmc_deploy.scci.get_virtual_fd_set_params_cmd .assert_called_once_with)('10.20.30.40', 'local', 0, 'share', 'floppy_image_filename', 'admin', 'admin0') irmc_client.assert_has_calls( [mock.call(fd_set_params, async=False), mock.call(irmc_deploy.scci.MOUNT_FD, async=False)]) @mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True, autospec=True) def test__attach_virtual_fd_fail(self, get_irmc_client_mock): irmc_client = get_irmc_client_mock.return_value irmc_client.side_effect = Exception("fake error") irmc_deploy.scci.SCCIClientError = Exception with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: e = self.assertRaises(exception.IRMCOperationError, irmc_deploy._attach_virtual_fd, task.node, 'iso_filename') get_irmc_client_mock.assert_called_once_with(task.node) self.assertEqual("iRMC Inserting virtual floppy failed. " + "Reason: fake error", str(e)) @mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True, autospec=True) def test__detach_virtual_fd_ok(self, get_irmc_client_mock): irmc_client = get_irmc_client_mock.return_value with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: irmc_deploy._detach_virtual_fd(task.node) irmc_client.assert_called_once_with(irmc_deploy.scci.UNMOUNT_FD) @mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True, autospec=True) def test__detach_virtual_fd_fail(self, get_irmc_client_mock): irmc_client = get_irmc_client_mock.return_value irmc_client.side_effect = Exception("fake error") irmc_deploy.scci.SCCIClientError = Exception with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: e = self.assertRaises(exception.IRMCOperationError, irmc_deploy._detach_virtual_fd, task.node) self.assertEqual("iRMC Ejecting virtual floppy failed. " "Reason: fake error", str(e)) @mock.patch.object(irmc_deploy, '_parse_config_option', spec_set=True, autospec=True) def test__check_share_fs_mounted_ok(self, parse_conf_mock): # Note(naohirot): mock.patch.stop() and mock.patch.start() don't work. # therefor monkey patching is used to # irmc_deploy._check_share_fs_mounted. # irmc_deploy._check_share_fs_mounted is mocked in # third_party_driver_mocks.py. # irmc_deploy._check_share_fs_mounted_orig is the real function. CONF.irmc.remote_image_share_root = '/' CONF.irmc.remote_image_share_type = 'nfs' result = irmc_deploy._check_share_fs_mounted_orig() parse_conf_mock.assert_called_once_with() self.assertIsNone(result) @mock.patch.object(irmc_deploy, '_parse_config_option', spec_set=True, autospec=True) def test__check_share_fs_mounted_exception(self, parse_conf_mock): # Note(naohirot): mock.patch.stop() and mock.patch.start() don't work. # therefor monkey patching is used to # irmc_deploy._check_share_fs_mounted. # irmc_deploy._check_share_fs_mounted is mocked in # third_party_driver_mocks.py. # irmc_deploy._check_share_fs_mounted_orig is the real function. CONF.irmc.remote_image_share_root = '/etc' CONF.irmc.remote_image_share_type = 'cifs' self.assertRaises(exception.IRMCSharedFileSystemNotMounted, irmc_deploy._check_share_fs_mounted_orig) parse_conf_mock.assert_called_once_with() class IRMCVirtualMediaIscsiDeployTestCase(db_base.DbTestCase): def setUp(self): irmc_deploy._check_share_fs_mounted_patcher.start() self.addCleanup(irmc_deploy._check_share_fs_mounted_patcher.stop) super(IRMCVirtualMediaIscsiDeployTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver="iscsi_irmc") self.node = obj_utils.create_test_node( self.context, driver='iscsi_irmc', driver_info=INFO_DICT) @mock.patch.object(deploy_utils, 'validate_capabilities', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'validate_image_properties', spec_set=True, autospec=True) @mock.patch.object(service_utils, 'is_glance_image', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_parse_deploy_info', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'validate', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_check_share_fs_mounted', spec_set=True, autospec=True) def test_validate_whole_disk_image(self, _check_share_fs_mounted_mock, validate_mock, deploy_info_mock, is_glance_image_mock, validate_prop_mock, validate_capabilities_mock): d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'} deploy_info_mock.return_value = d_info with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.driver_internal_info = {'is_whole_disk_image': True} task.driver.deploy.validate(task) _check_share_fs_mounted_mock.assert_called_once_with() validate_mock.assert_called_once_with(task) deploy_info_mock.assert_called_once_with(task.node) self.assertFalse(is_glance_image_mock.called) validate_prop_mock.assert_called_once_with(task.context, d_info, []) validate_capabilities_mock.assert_called_once_with(task.node) @mock.patch.object(deploy_utils, 'validate_capabilities', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'validate_image_properties', spec_set=True, autospec=True) @mock.patch.object(service_utils, 'is_glance_image', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_parse_deploy_info', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'validate', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_check_share_fs_mounted', spec_set=True, autospec=True) def test_validate_glance_image(self, _check_share_fs_mounted_mock, validate_mock, deploy_info_mock, is_glance_image_mock, validate_prop_mock, validate_capabilities_mock): d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'} deploy_info_mock.return_value = d_info is_glance_image_mock.return_value = True with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.deploy.validate(task) _check_share_fs_mounted_mock.assert_called_once_with() validate_mock.assert_called_once_with(task) deploy_info_mock.assert_called_once_with(task.node) validate_prop_mock.assert_called_once_with( task.context, d_info, ['kernel_id', 'ramdisk_id']) validate_capabilities_mock.assert_called_once_with(task.node) @mock.patch.object(deploy_utils, 'validate_capabilities', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'validate_image_properties', spec_set=True, autospec=True) @mock.patch.object(service_utils, 'is_glance_image', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_parse_deploy_info', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'validate', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_check_share_fs_mounted', spec_set=True, autospec=True) def test_validate_non_glance_image(self, _check_share_fs_mounted_mock, validate_mock, deploy_info_mock, is_glance_image_mock, validate_prop_mock, validate_capabilities_mock): d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'} deploy_info_mock.return_value = d_info is_glance_image_mock.return_value = False with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.deploy.validate(task) _check_share_fs_mounted_mock.assert_called_once_with() validate_mock.assert_called_once_with(task) deploy_info_mock.assert_called_once_with(task.node) validate_prop_mock.assert_called_once_with( task.context, d_info, ['kernel', 'ramdisk']) validate_capabilities_mock.assert_called_once_with(task.node) @mock.patch.object(irmc_deploy, '_reboot_into_deploy_iso', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'get_single_nic_with_vif_port_id', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'build_agent_options', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'check_image_size', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'cache_instance_image', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_power_action', spec_set=True, autospec=True) def test_deploy(self, node_power_action_mock, cache_instance_image_mock, check_image_size_mock, build_deploy_ramdisk_options_mock, build_agent_options_mock, get_single_nic_with_vif_port_id_mock, _reboot_into_mock): deploy_opts = {'a': 'b'} build_agent_options_mock.return_value = { 'ipa-api-url': 'http://1.2.3.4:6385'} build_deploy_ramdisk_options_mock.return_value = deploy_opts get_single_nic_with_vif_port_id_mock.return_value = '12:34:56:78:90:ab' with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: returned_state = task.driver.deploy.deploy(task) node_power_action_mock.assert_called_once_with( task, states.POWER_OFF) cache_instance_image_mock.assert_called_once_with( task.context, task.node) check_image_size_mock.assert_called_once_with(task) expected_ramdisk_opts = {'a': 'b', 'BOOTIF': '12:34:56:78:90:ab', 'ipa-api-url': 'http://1.2.3.4:6385'} build_agent_options_mock.assert_called_once_with(task.node) build_deploy_ramdisk_options_mock.assert_called_once_with( task.node) get_single_nic_with_vif_port_id_mock.assert_called_once_with( task) _reboot_into_mock.assert_called_once_with( task, expected_ramdisk_opts) self.assertEqual(states.DEPLOYWAIT, returned_state) @mock.patch.object(manager_utils, 'node_power_action', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_remove_share_file', spec_set=True, autospec=True) def test_tear_down(self, _remove_share_file_mock, node_power_action_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.instance_info['irmc_boot_iso'] = 'glance://deploy_iso' task.node.driver_internal_info['irmc_boot_iso'] = 'irmc_boot.iso' returned_state = task.driver.deploy.tear_down(task) _remove_share_file_mock.assert_called_once_with( irmc_deploy._get_boot_iso_name(task.node)) node_power_action_mock.assert_called_once_with( task, states.POWER_OFF) self.assertFalse( task.node.driver_internal_info.get('irmc_boot_iso')) self.assertEqual(states.DELETED, returned_state) @mock.patch.object(iscsi_deploy, 'destroy_images', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_clean_up(self, _cleanup_vmedia_boot_mock, destroy_images_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.deploy.clean_up(task) _cleanup_vmedia_boot_mock.assert_called_once_with(task) destroy_images_mock.assert_called_once_with(task.node.uuid) class IRMCVirtualMediaAgentDeployTestCase(db_base.DbTestCase): def setUp(self): irmc_deploy._check_share_fs_mounted_patcher.start() self.addCleanup(irmc_deploy._check_share_fs_mounted_patcher.stop) super(IRMCVirtualMediaAgentDeployTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver="agent_irmc") self.node = obj_utils.create_test_node( self.context, driver='agent_irmc', driver_info=INFO_DICT) @mock.patch.object(deploy_utils, 'validate_capabilities', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_parse_driver_info', spec_set=True, autospec=True) def test_validate(self, _parse_driver_info_mock, validate_capabilities_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.deploy.validate(task) _parse_driver_info_mock.assert_called_once_with(task.node) validate_capabilities_mock.assert_called_once_with(task.node) @mock.patch.object(irmc_deploy, '_reboot_into_deploy_iso', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'build_agent_options', spec_set=True, autospec=True) def test_deploy(self, build_agent_options_mock, _reboot_into_deploy_iso_mock): deploy_ramdisk_opts = build_agent_options_mock.return_value with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: returned_state = task.driver.deploy.deploy(task) build_agent_options_mock.assert_called_once_with(task.node) _reboot_into_deploy_iso_mock.assert_called_once_with( task, deploy_ramdisk_opts) self.assertEqual(states.DEPLOYWAIT, returned_state) @mock.patch.object(manager_utils, 'node_power_action', spec_set=True, autospec=True) def test_tear_down(self, node_power_action_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: returned_state = task.driver.deploy.tear_down(task) node_power_action_mock.assert_called_once_with( task, states.POWER_OFF) self.assertEqual(states.DELETED, returned_state) @mock.patch.object(agent, 'build_instance_info_for_deploy', spec_set=True, autospec=True) def test_prepare(self, build_instance_info_for_deploy_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.save = mock.MagicMock(sepc_set=[]) task.driver.deploy.prepare(task) build_instance_info_for_deploy_mock.assert_called_once_with( task) task.node.save.assert_called_once_with() @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_clean_up(self, _cleanup_vmedia_boot_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.deploy.clean_up(task) _cleanup_vmedia_boot_mock.assert_called_once_with(task) class VendorPassthruTestCase(db_base.DbTestCase): def setUp(self): irmc_deploy._check_share_fs_mounted_patcher.start() self.addCleanup(irmc_deploy._check_share_fs_mounted_patcher.stop) super(VendorPassthruTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver="iscsi_irmc") self.node = obj_utils.create_test_node( self.context, driver='iscsi_irmc', driver_info=INFO_DICT) CONF.irmc.remote_image_share_root = '/remote_image_share_root' CONF.irmc.remote_image_server = '10.20.30.40' CONF.irmc.remote_image_share_type = 'NFS' CONF.irmc.remote_image_share_name = 'share' CONF.irmc.remote_image_user_name = 'admin' CONF.irmc.remote_image_user_password = 'admin0' CONF.irmc.remote_image_user_domain = 'local' @mock.patch.object(iscsi_deploy, 'get_deploy_info', spec_set=True, autospec=True) def test_validate_pass_deploy_info(self, get_deploy_info_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.vendor.validate(task, method='pass_deploy_info', a=1) get_deploy_info_mock.assert_called_once_with(task.node, a=1) @mock.patch.object(iscsi_deploy, 'validate_pass_bootloader_info_input', spec_set=True, autospec=True) def test_validate_pass_bootloader_install_info(self, validate_mock): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: kwargs = {'address': '1.2.3.4', 'key': 'fake-key', 'status': 'SUCCEEDED', 'error': ''} task.driver.vendor.validate( task, method='pass_bootloader_install_info', **kwargs) validate_mock.assert_called_once_with(task, kwargs) @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, 'setup_vmedia_for_boot', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_prepare_boot_iso', spec_set=True, autospec=True) def test__configure_vmedia_boot(self, _prepare_boot_iso_mock, setup_vmedia_for_boot_mock, node_set_boot_device): root_uuid_or_disk_id = {'root uuid': 'root_uuid'} with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.driver_internal_info['irmc_boot_iso'] = 'boot.iso' task.driver.vendor._configure_vmedia_boot( task, root_uuid_or_disk_id) _prepare_boot_iso_mock.assert_called_once_with( task, root_uuid_or_disk_id) setup_vmedia_for_boot_mock.assert_called_once_with( task, 'boot.iso') node_set_boot_device.assert_called_once_with( task, boot_devices.CDROM, persistent=True) @mock.patch.object(iscsi_deploy, 'validate_bootloader_install_status', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'finish_deploy', spec_set=True, autospec=True) def test_pass_bootloader_install_info(self, finish_deploy_mock, validate_input_mock): kwargs = {'method': 'pass_deploy_info', 'address': '123456'} self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.vendor.pass_bootloader_install_info(task, **kwargs) finish_deploy_mock.assert_called_once_with(task, '123456') validate_input_mock.assert_called_once_with(task, kwargs) @mock.patch.object(deploy_utils, 'set_failed_state', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, 'setup_vmedia_for_boot', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_prepare_boot_iso', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'continue_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_pass_deploy_info_ok(self, _cleanup_vmedia_boot_mock, continue_deploy_mock, _prepare_boot_iso_mock, setup_vmedia_for_boot_mock, node_set_boot_device_mock, notify_ramdisk_to_proceed_mock, set_failed_state_mock): kwargs = {'method': 'pass_deploy_info', 'address': '123456'} continue_deploy_mock.return_value = {'root uuid': 'root_uuid'} self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.driver_internal_info['irmc_boot_iso'] = 'irmc_boot.iso' task.driver.vendor.pass_deploy_info(task, **kwargs) _cleanup_vmedia_boot_mock.assert_called_once_with(task) continue_deploy_mock.assert_called_once_with(task, **kwargs) _prepare_boot_iso_mock.assert_called_once_with( task, 'root_uuid') setup_vmedia_for_boot_mock.assert_called_once_with( task, 'irmc_boot.iso') node_set_boot_device_mock.assert_called_once_with( task, boot_devices.CDROM, persistent=True) notify_ramdisk_to_proceed_mock.assert_called_once_with( '123456') self.assertEqual(states.ACTIVE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) self.assertFalse(set_failed_state_mock.called) @mock.patch.object(deploy_utils, 'set_failed_state', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, 'setup_vmedia_for_boot', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_prepare_boot_iso', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'continue_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_pass_deploy_info_fail(self, _cleanup_vmedia_boot_mock, continue_deploy_mock, _prepare_boot_iso_mock, setup_vmedia_for_boot_mock, node_set_boot_device_mock, notify_ramdisk_to_proceed_mock, set_failed_state_mock): kwargs = {'method': 'pass_deploy_info', 'address': '123456'} self.node.provision_state = states.AVAILABLE self.node.target_provision_state = states.NOSTATE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.InvalidState, task.driver.vendor.pass_deploy_info, task, **kwargs) self.assertEqual(states.AVAILABLE, task.node.provision_state) self.assertEqual(states.NOSTATE, task.node.target_provision_state) self.assertFalse(_cleanup_vmedia_boot_mock.called) self.assertFalse(continue_deploy_mock.called) self.assertFalse(_prepare_boot_iso_mock.called) self.assertFalse(setup_vmedia_for_boot_mock.called) self.assertFalse(node_set_boot_device_mock.called) self.assertFalse(notify_ramdisk_to_proceed_mock.called) self.assertFalse(set_failed_state_mock.called) @mock.patch.object(manager_utils, 'node_power_action', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, 'setup_vmedia_for_boot', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_prepare_boot_iso', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'continue_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_pass_deploy_info__prepare_boot_exception( self, _cleanup_vmedia_boot_mock, continue_deploy_mock, _prepare_boot_iso_mock, setup_vmedia_for_boot_mock, node_set_boot_device_mock, notify_ramdisk_to_proceed_mock, node_power_mock): kwargs = {'method': 'pass_deploy_info', 'address': '123456'} continue_deploy_mock.return_value = {'root uuid': 'root_uuid'} _prepare_boot_iso_mock.side_effect = Exception("fake error") self.node.driver_internal_info = {'is_whole_disk_image': False} self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.vendor.pass_deploy_info(task, **kwargs) continue_deploy_mock.assert_called_once_with( task, method='pass_deploy_info', address='123456') _cleanup_vmedia_boot_mock.assert_called_once_with(task) _prepare_boot_iso_mock.assert_called_once_with( task, 'root_uuid') self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertFalse(setup_vmedia_for_boot_mock.called) self.assertFalse(node_set_boot_device_mock.called) self.assertFalse(notify_ramdisk_to_proceed_mock.called) node_power_mock.assert_called_once_with(task, states.POWER_OFF) @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'continue_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_pass_deploy_info_localboot(self, _cleanup_vmedia_boot_mock, continue_deploy_mock, set_boot_device_mock, notify_ramdisk_to_proceed_mock): kwargs = {'method': 'pass_deploy_info', 'address': '123456'} continue_deploy_mock.return_value = {'root uuid': '<some-uuid>'} self.node.driver_internal_info = {'is_whole_disk_image': False} self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.instance_info = {'capabilities': '{"boot_option": "local"}'} self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: vendor = task.driver.vendor vendor.pass_deploy_info(task, **kwargs) _cleanup_vmedia_boot_mock.assert_called_once_with(task) continue_deploy_mock.assert_called_once_with(task, **kwargs) set_boot_device_mock.assert_called_once_with(task, boot_devices.DISK, persistent=True) notify_ramdisk_to_proceed_mock.assert_called_once_with('123456') self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) @mock.patch.object(iscsi_deploy, 'finish_deploy', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'continue_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_pass_deploy_info_whole_disk_image( self, _cleanup_vmedia_boot_mock, continue_deploy_mock, set_boot_device_mock, notify_ramdisk_to_proceed_mock, finish_deploy_mock): kwargs = {'method': 'pass_deploy_info', 'address': '123456'} continue_deploy_mock.return_value = {'root uuid': '<some-uuid>'} self.node.driver_internal_info = {'is_whole_disk_image': True} self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: vendor = task.driver.vendor vendor.pass_deploy_info(task, **kwargs) _cleanup_vmedia_boot_mock.assert_called_once_with(task) continue_deploy_mock.assert_called_once_with(task, **kwargs) set_boot_device_mock.assert_called_once_with(task, boot_devices.DISK, persistent=True) self.assertFalse(notify_ramdisk_to_proceed_mock.called) finish_deploy_mock.assert_called_once_with(task, '123456') @mock.patch.object(iscsi_deploy, 'finish_deploy', spec_set=True, autospec=True) @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed', spec_set=True, autospec=True) @mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'continue_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_pass_deploy_info_whole_disk_image_local( self, _cleanup_vmedia_boot_mock, continue_deploy_mock, set_boot_device_mock, notify_ramdisk_to_proceed_mock, finish_deploy_mock): kwargs = {'method': 'pass_deploy_info', 'address': '123456'} continue_deploy_mock.return_value = {'root uuid': '<some-uuid>'} self.node.driver_internal_info = {'is_whole_disk_image': True} self.node.instance_info = {'capabilities': '{"boot_option": "local"}'} self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: vendor = task.driver.vendor vendor.pass_deploy_info(task, **kwargs) _cleanup_vmedia_boot_mock.assert_called_once_with(task) continue_deploy_mock.assert_called_once_with(task, **kwargs) set_boot_device_mock.assert_called_once_with(task, boot_devices.DISK, persistent=True) self.assertFalse(notify_ramdisk_to_proceed_mock.called) finish_deploy_mock.assert_called_once_with(task, '123456') @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'reboot_and_finish_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy.VendorPassthru, '_configure_vmedia_boot', autospec=True) @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_continue_deploy_netboot(self, _cleanup_vmedia_boot_mock, do_agent_iscsi_deploy_mock, _configure_vmedia_boot_mock, reboot_and_finish_deploy_mock): self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() do_agent_iscsi_deploy_mock.return_value = { 'root uuid': 'some-root-uuid'} with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.vendor.continue_deploy(task) _cleanup_vmedia_boot_mock.assert_called_once_with(task) do_agent_iscsi_deploy_mock.assert_called_once_with(task, mock.ANY) _configure_vmedia_boot_mock.assert_called_once_with( mock.ANY, task, 'some-root-uuid') reboot_and_finish_deploy_mock.assert_called_once_with( task.driver.vendor, task) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'reboot_and_finish_deploy', spec_set=True, autospec=True) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'configure_local_boot', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_continue_deploy_localboot(self, _cleanup_vmedia_boot_mock, do_agent_iscsi_deploy_mock, configure_local_boot_mock, reboot_and_finish_deploy_mock): self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.instance_info = { 'capabilities': {'boot_option': 'local'}} self.node.save() do_agent_iscsi_deploy_mock.return_value = { 'root uuid': 'some-root-uuid'} with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.vendor.continue_deploy(task) _cleanup_vmedia_boot_mock.assert_called_once_with(task) do_agent_iscsi_deploy_mock.assert_called_once_with(task, mock.ANY) configure_local_boot_mock.assert_called_once_with( mock.ANY, task, root_uuid='some-root-uuid', efi_system_part_uuid=None) reboot_and_finish_deploy_mock.assert_called_once_with( mock.ANY, task) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'reboot_and_finish_deploy', spec_set=True, autospec=True) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'configure_local_boot', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', spec_set=True, autospec=True) def test_continue_deploy_whole_disk_image(self, _cleanup_vmedia_boot_mock, do_agent_iscsi_deploy_mock, configure_local_boot_mock, reboot_and_finish_deploy_mock): self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.driver_internal_info = {'is_whole_disk_image': True} self.node.save() do_agent_iscsi_deploy_mock.return_value = { 'disk identifier': 'some-disk-id'} with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.vendor.continue_deploy(task) _cleanup_vmedia_boot_mock.assert_called_once_with(task) do_agent_iscsi_deploy_mock.assert_called_once_with(task, mock.ANY) configure_local_boot_mock.assert_called_once_with( mock.ANY, task, root_uuid=None, efi_system_part_uuid=None) reboot_and_finish_deploy_mock.assert_called_once_with( mock.ANY, task) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'reboot_and_finish_deploy', spec_set=True, autospec=True) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'configure_local_boot', spec_set=True, autospec=True) @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', autospec=True) def test_continue_deploy_localboot_uefi(self, _cleanup_vmedia_boot_mock, do_agent_iscsi_deploy_mock, configure_local_boot_mock, reboot_and_finish_deploy_mock): self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.instance_info = { 'capabilities': {'boot_option': 'local'}} self.node.save() do_agent_iscsi_deploy_mock.return_value = { 'root uuid': 'some-root-uuid', 'efi system partition uuid': 'efi-system-part-uuid'} with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.vendor.continue_deploy(task) _cleanup_vmedia_boot_mock.assert_called_once_with(task) do_agent_iscsi_deploy_mock.assert_called_once_with(task, mock.ANY) configure_local_boot_mock.assert_called_once_with( mock.ANY, task, root_uuid='some-root-uuid', efi_system_part_uuid='efi-system-part-uuid') reboot_and_finish_deploy_mock.assert_called_once_with( mock.ANY, task) class IRMCVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase): def setUp(self): irmc_deploy._check_share_fs_mounted_patcher.start() self.addCleanup(irmc_deploy._check_share_fs_mounted_patcher.stop) super(IRMCVirtualMediaAgentVendorInterfaceTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver="agent_irmc") self.node = obj_utils.create_test_node( self.context, driver='agent_irmc', driver_info=INFO_DICT) @mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance', spec_set=True, autospec=True) @mock.patch.object(irmc_deploy, '_cleanup_vmedia_boot', autospec=True) def test_reboot_to_instance(self, _cleanup_vmedia_boot_mock, agent_reboot_to_instance_mock): kwargs = {} with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.vendor.reboot_to_instance(task, **kwargs) _cleanup_vmedia_boot_mock.assert_called_once_with(task) agent_reboot_to_instance_mock.assert_called_once_with( mock.ANY, task, **kwargs)
r""" Composition Statistics (:mod:`skbio.stats.composition`) ======================================================= .. currentmodule:: skbio.stats.composition This module provides functions for compositional data analysis. Many 'omics datasets are inherently compositional - meaning that they are best interpreted as proportions or percentages rather than absolute counts. Formally, :math:`x` is a composition if :math:`\sum_{i=0}^D x_{i} = c` and :math:`x_{i} > 0`, :math:`1 \leq i \leq D` and :math:`c` is a real valued constant and there are :math:`D` components for each composition. In this module :math:`c=1`. Compositional data can be analyzed using Aitchison geometry. [1]_ However, in this framework, standard real Euclidean operations such as addition and multiplication no longer apply. Only operations such as perturbation and power can be used to manipulate this data. This module allows two styles of manipulation of compositional data. Compositional data can be analyzed using perturbation and power operations, which can be useful for simulation studies. The alternative strategy is to transform compositional data into the real space. Right now, the centre log ratio transform (clr) and the isometric log ratio transform (ilr) [2]_ can be used to accomplish this. This transform can be useful for performing standard statistical tools such as parametric hypothesis testing, regressions and more. The major caveat of using this framework is dealing with zeros. In the Aitchison geometry, only compositions with nonzero components can be considered. The multiplicative replacement technique [3]_ can be used to substitute these zeros with small pseudocounts without introducing major distortions to the data. Functions --------- .. autosummary:: :toctree: generated/ closure multiplicative_replacement perturb perturb_inv power inner clr clr_inv ilr ilr_inv centralize ancom References ---------- .. [1] V. Pawlowsky-Glahn, "Lecture Notes on Compositional Data Analysis" (2007) .. [2] J. J. Egozcue., "Isometric Logratio Transformations for Compositional Data Analysis" Mathematical Geology, 35.3 (2003) .. [3] J. A. Martin-Fernandez, "Dealing With Zeros and Missing Values in Compositional Data Sets Using Nonparametric Imputation", Mathematical Geology, 35.3 (2003) Examples -------- >>> import numpy as np Consider a very simple environment with only 3 species. The species in the environment are equally distributed and their proportions are equivalent: >>> otus = np.array([1./3, 1./3., 1./3]) Suppose that an antibiotic kills off half of the population for the first two species, but doesn't harm the third species. Then the perturbation vector would be as follows >>> antibiotic = np.array([1./2, 1./2, 1]) And the resulting perturbation would be >>> perturb(otus, antibiotic) array([ 0.25, 0.25, 0.5 ]) """ # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import numpy as np import pandas as pd import scipy.stats import skbio.util from skbio.util._decorator import experimental @experimental(as_of="0.4.0") def closure(mat): """ Performs closure to ensure that all elements add up to 1. Parameters ---------- mat : array_like a matrix of proportions where rows = compositions columns = components Returns ------- array_like, np.float64 A matrix of proportions where all of the values are nonzero and each composition (row) adds up to 1 Raises ------ ValueError Raises an error if any values are negative. ValueError Raises an error if the matrix has more than 2 dimension. ValueError Raises an error if there is a row that has all zeros. Examples -------- >>> import numpy as np >>> from skbio.stats.composition import closure >>> X = np.array([[2, 2, 6], [4, 4, 2]]) >>> closure(X) array([[ 0.2, 0.2, 0.6], [ 0.4, 0.4, 0.2]]) """ mat = np.atleast_2d(mat) if np.any(mat < 0): raise ValueError("Cannot have negative proportions") if mat.ndim > 2: raise ValueError("Input matrix can only have two dimensions or less") if np.all(mat == 0, axis=1).sum() > 0: raise ValueError("Input matrix cannot have rows with all zeros") mat = mat / mat.sum(axis=1, keepdims=True) return mat.squeeze() @experimental(as_of="0.4.0") def multiplicative_replacement(mat, delta=None): r"""Replace all zeros with small non-zero values It uses the multiplicative replacement strategy [1]_ , replacing zeros with a small positive :math:`\delta` and ensuring that the compositions still add up to 1. Parameters ---------- mat: array_like a matrix of proportions where rows = compositions and columns = components delta: float, optional a small number to be used to replace zeros If delta is not specified, then the default delta is :math:`\delta = \frac{1}{N^2}` where :math:`N` is the number of components Returns ------- numpy.ndarray, np.float64 A matrix of proportions where all of the values are nonzero and each composition (row) adds up to 1 Raises ------ ValueError Raises an error if negative proportions are created due to a large `delta`. Notes ----- This method will result in negative proportions if a large delta is chosen. References ---------- .. [1] J. A. Martin-Fernandez. "Dealing With Zeros and Missing Values in Compositional Data Sets Using Nonparametric Imputation" Examples -------- >>> import numpy as np >>> from skbio.stats.composition import multiplicative_replacement >>> X = np.array([[.2,.4,.4, 0],[0,.5,.5,0]]) >>> multiplicative_replacement(X) array([[ 0.1875, 0.375 , 0.375 , 0.0625], [ 0.0625, 0.4375, 0.4375, 0.0625]]) """ mat = closure(mat) z_mat = (mat == 0) num_feats = mat.shape[-1] tot = z_mat.sum(axis=-1, keepdims=True) if delta is None: delta = (1. / num_feats)**2 zcnts = 1 - tot * delta if np.any(zcnts) < 0: raise ValueError('The multiplicative replacment created negative ' 'proportions. Consider using a smaller `delta`.') mat = np.where(z_mat, delta, zcnts * mat) return mat.squeeze() @experimental(as_of="0.4.0") def perturb(x, y): r""" Performs the perturbation operation. This operation is defined as .. math:: x \oplus y = C[x_1 y_1, \ldots, x_D y_D] :math:`C[x]` is the closure operation defined as .. math:: C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots, \frac{x_D}{\sum_{i=1}^{D} x_i} \right] for some :math:`D` dimensional real vector :math:`x` and :math:`D` is the number of components for every composition. Parameters ---------- x : array_like, float a matrix of proportions where rows = compositions and columns = components y : array_like, float a matrix of proportions where rows = compositions and columns = components Returns ------- numpy.ndarray, np.float64 A matrix of proportions where all of the values are nonzero and each composition (row) adds up to 1 Examples -------- >>> import numpy as np >>> from skbio.stats.composition import perturb >>> x = np.array([.1,.3,.4, .2]) >>> y = np.array([1./6,1./6,1./3,1./3]) >>> perturb(x,y) array([ 0.0625, 0.1875, 0.5 , 0.25 ]) """ x, y = closure(x), closure(y) return closure(x * y) @experimental(as_of="0.4.0") def perturb_inv(x, y): r""" Performs the inverse perturbation operation. This operation is defined as .. math:: x \ominus y = C[x_1 y_1^{-1}, \ldots, x_D y_D^{-1}] :math:`C[x]` is the closure operation defined as .. math:: C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots, \frac{x_D}{\sum_{i=1}^{D} x_i} \right] for some :math:`D` dimensional real vector :math:`x` and :math:`D` is the number of components for every composition. Parameters ---------- x : array_like a matrix of proportions where rows = compositions and columns = components y : array_like a matrix of proportions where rows = compositions and columns = components Returns ------- numpy.ndarray, np.float64 A matrix of proportions where all of the values are nonzero and each composition (row) adds up to 1 Examples -------- >>> import numpy as np >>> from skbio.stats.composition import perturb_inv >>> x = np.array([.1,.3,.4, .2]) >>> y = np.array([1./6,1./6,1./3,1./3]) >>> perturb_inv(x,y) array([ 0.14285714, 0.42857143, 0.28571429, 0.14285714]) """ x, y = closure(x), closure(y) return closure(x / y) @experimental(as_of="0.4.0") def power(x, a): r""" Performs the power operation. This operation is defined as follows .. math:: `x \odot a = C[x_1^a, \ldots, x_D^a] :math:`C[x]` is the closure operation defined as .. math:: C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots, \frac{x_D}{\sum_{i=1}^{D} x_i} \right] for some :math:`D` dimensional real vector :math:`x` and :math:`D` is the number of components for every composition. Parameters ---------- x : array_like, float a matrix of proportions where rows = compositions and columns = components a : float a scalar float Returns ------- numpy.ndarray, np.float64 A matrix of proportions where all of the values are nonzero and each composition (row) adds up to 1 Examples -------- >>> import numpy as np >>> from skbio.stats.composition import power >>> x = np.array([.1,.3,.4, .2]) >>> power(x, .1) array([ 0.23059566, 0.25737316, 0.26488486, 0.24714631]) """ x = closure(x) return closure(x**a).squeeze() @experimental(as_of="0.4.0") def inner(x, y): r""" Calculates the Aitchson inner product. This inner product is defined as follows .. math:: \langle x, y \rangle_a = \frac{1}{2D} \sum\limits_{i=1}^{D} \sum\limits_{j=1}^{D} \ln\left(\frac{x_i}{x_j}\right) \ln\left(\frac{y_i}{y_j}\right) Parameters ---------- x : array_like a matrix of proportions where rows = compositions and columns = components y : array_like a matrix of proportions where rows = compositions and columns = components Returns ------- numpy.ndarray inner product result Examples -------- >>> import numpy as np >>> from skbio.stats.composition import inner >>> x = np.array([.1, .3, .4, .2]) >>> y = np.array([.2, .4, .2, .2]) >>> inner(x, y) # doctest: +ELLIPSIS 0.2107852473... """ x = closure(x) y = closure(y) a, b = clr(x), clr(y) return a.dot(b.T) @experimental(as_of="0.4.0") def clr(mat): r""" Performs centre log ratio transformation. This function transforms compositions from Aitchison geometry to the real space. The :math:`clr` transform is both an isometry and an isomorphism defined on the following spaces :math:`clr: S^D \rightarrow U` where :math:`U= \{x :\sum\limits_{i=1}^D x = 0 \; \forall x \in \mathbb{R}^D\}` It is defined for a composition :math:`x` as follows: .. math:: clr(x) = \ln\left[\frac{x_1}{g_m(x)}, \ldots, \frac{x_D}{g_m(x)}\right] where :math:`g_m(x) = (\prod\limits_{i=1}^{D} x_i)^{1/D}` is the geometric mean of :math:`x`. Parameters ---------- mat : array_like, float a matrix of proportions where rows = compositions and columns = components Returns ------- numpy.ndarray clr transformed matrix Examples -------- >>> import numpy as np >>> from skbio.stats.composition import clr >>> x = np.array([.1, .3, .4, .2]) >>> clr(x) array([-0.79451346, 0.30409883, 0.5917809 , -0.10136628]) """ mat = closure(mat) lmat = np.log(mat) gm = lmat.mean(axis=-1, keepdims=True) return (lmat - gm).squeeze() @experimental(as_of="0.4.0") def clr_inv(mat): r""" Performs inverse centre log ratio transformation. This function transforms compositions from the real space to Aitchison geometry. The :math:`clr^{-1}` transform is both an isometry, and an isomorphism defined on the following spaces :math:`clr^{-1}: U \rightarrow S^D` where :math:`U= \{x :\sum\limits_{i=1}^D x = 0 \; \forall x \in \mathbb{R}^D\}` This transformation is defined as follows .. math:: clr^{-1}(x) = C[\exp( x_1, \ldots, x_D)] Parameters ---------- mat : array_like, float a matrix of real values where rows = transformed compositions and columns = components Returns ------- numpy.ndarray inverse clr transformed matrix Examples -------- >>> import numpy as np >>> from skbio.stats.composition import clr_inv >>> x = np.array([.1, .3, .4, .2]) >>> clr_inv(x) array([ 0.21383822, 0.26118259, 0.28865141, 0.23632778]) """ return closure(np.exp(mat)) @experimental(as_of="0.4.0") def ilr(mat, basis=None, check=True): r""" Performs isometric log ratio transformation. This function transforms compositions from Aitchison simplex to the real space. The :math: ilr` transform is both an isometry, and an isomorphism defined on the following spaces :math:`ilr: S^D \rightarrow \mathbb{R}^{D-1}` The ilr transformation is defined as follows .. math:: ilr(x) = [\langle x, e_1 \rangle_a, \ldots, \langle x, e_{D-1} \rangle_a] where :math:`[e_1,\ldots,e_{D-1}]` is an orthonormal basis in the simplex. If an orthornormal basis isn't specified, the J. J. Egozcue orthonormal basis derived from Gram-Schmidt orthogonalization will be used by default. Parameters ---------- mat: numpy.ndarray a matrix of proportions where rows = compositions and columns = components basis: numpy.ndarray, float, optional orthonormal basis for Aitchison simplex defaults to J.J.Egozcue orthonormal basis. check: bool Specifies if the basis is orthonormal. Examples -------- >>> import numpy as np >>> from skbio.stats.composition import ilr >>> x = np.array([.1, .3, .4, .2]) >>> ilr(x) array([-0.7768362 , -0.68339802, 0.11704769]) Notes ----- If the `basis` parameter is specified, it is expected to be a basis in the Aitchison simplex. If there are `D-1` elements specified in `mat`, then the dimensions of the basis needs be `D-1 x D`, where rows represent basis vectors, and the columns represent proportions. """ mat = closure(mat) if basis is None: basis = clr_inv(_gram_schmidt_basis(mat.shape[-1])) else: if len(basis.shape) != 2: raise ValueError("Basis needs to be a 2D matrix, " "not a %dD matrix." % (len(basis.shape))) if check: _check_orthogonality(basis) return inner(mat, basis) @experimental(as_of="0.4.0") def ilr_inv(mat, basis=None, check=True): r""" Performs inverse isometric log ratio transform. This function transforms compositions from the real space to Aitchison geometry. The :math:`ilr^{-1}` transform is both an isometry, and an isomorphism defined on the following spaces :math:`ilr^{-1}: \mathbb{R}^{D-1} \rightarrow S^D` The inverse ilr transformation is defined as follows .. math:: ilr^{-1}(x) = \bigoplus\limits_{i=1}^{D-1} x \odot e_i where :math:`[e_1,\ldots, e_{D-1}]` is an orthonormal basis in the simplex. If an orthonormal basis isn't specified, the J. J. Egozcue orthonormal basis derived from Gram-Schmidt orthogonalization will be used by default. Parameters ---------- mat: numpy.ndarray, float a matrix of transformed proportions where rows = compositions and columns = components basis: numpy.ndarray, float, optional orthonormal basis for Aitchison simplex defaults to J.J.Egozcue orthonormal basis check: bool Specifies if the basis is orthonormal. Examples -------- >>> import numpy as np >>> from skbio.stats.composition import ilr >>> x = np.array([.1, .3, .6,]) >>> ilr_inv(x) array([ 0.34180297, 0.29672718, 0.22054469, 0.14092516]) Notes ----- If the `basis` parameter is specified, it is expected to be a basis in the Aitchison simplex. If there are `D-1` elements specified in `mat`, then the dimensions of the basis needs be `D-1 x D`, where rows represent basis vectors, and the columns represent proportions. """ if basis is None: basis = _gram_schmidt_basis(mat.shape[-1] + 1) else: if len(basis.shape) != 2: raise ValueError("Basis needs to be a 2D matrix, " "not a %dD matrix." % (len(basis.shape))) if check: _check_orthogonality(basis) # this is necessary, since the clr function # performs np.squeeze() basis = np.atleast_2d(clr(basis)) return clr_inv(np.dot(mat, basis)) @experimental(as_of="0.4.0") def centralize(mat): r"""Center data around its geometric average. Parameters ---------- mat : array_like, float a matrix of proportions where rows = compositions and columns = components Returns ------- numpy.ndarray centered composition matrix Examples -------- >>> import numpy as np >>> from skbio.stats.composition import centralize >>> X = np.array([[.1,.3,.4, .2],[.2,.2,.2,.4]]) >>> centralize(X) array([[ 0.17445763, 0.30216948, 0.34891526, 0.17445763], [ 0.32495488, 0.18761279, 0.16247744, 0.32495488]]) """ mat = closure(mat) cen = scipy.stats.gmean(mat, axis=0) return perturb_inv(mat, cen) @experimental(as_of="0.4.1") def ancom(table, grouping, alpha=0.05, tau=0.02, theta=0.1, multiple_comparisons_correction='holm-bonferroni', significance_test=None, percentiles=(0.0, 25.0, 50.0, 75.0, 100.0)): r""" Performs a differential abundance test using ANCOM. This is done by calculating pairwise log ratios between all features and performing a significance test to determine if there is a significant difference in feature ratios with respect to the variable of interest. In an experiment with only two treatments, this tests the following hypothesis for feature :math:`i` .. math:: H_{0i}: \mathbb{E}[\ln(u_i^{(1)})] = \mathbb{E}[\ln(u_i^{(2)})] where :math:`u_i^{(1)}` is the mean abundance for feature :math:`i` in the first group and :math:`u_i^{(2)}` is the mean abundance for feature :math:`i` in the second group. Parameters ---------- table : pd.DataFrame A 2D matrix of strictly positive values (i.e. counts or proportions) where the rows correspond to samples and the columns correspond to features. grouping : pd.Series Vector indicating the assignment of samples to groups. For example, these could be strings or integers denoting which group a sample belongs to. It must be the same length as the samples in `table`. The index must be the same on `table` and `grouping` but need not be in the same order. alpha : float, optional Significance level for each of the statistical tests. This can can be anywhere between 0 and 1 exclusive. tau : float, optional A constant used to determine an appropriate cutoff. A value close to zero indicates a conservative cutoff. This can can be anywhere between 0 and 1 exclusive. theta : float, optional Lower bound for the proportion for the W-statistic. If all W-statistics are lower than theta, then no features will be detected to be differentially significant. This can can be anywhere between 0 and 1 exclusive. multiple_comparisons_correction : {None, 'holm-bonferroni'}, optional The multiple comparison correction procedure to run. If None, then no multiple comparison correction procedure will be run. If 'holm-boniferroni' is specified, then the Holm-Boniferroni procedure [1]_ will be run. significance_test : function, optional A statistical significance function to test for significance between classes. This function must be able to accept at least two 1D array_like arguments of floats and returns a test statistic and a p-value. By default ``scipy.stats.f_oneway`` is used. percentiles : iterable of floats, optional Percentile abundances to return for each feature in each group. By default, will return the minimum, 25th percentile, median, 75th percentile, and maximum abundances for each feature in each group. Returns ------- pd.DataFrame A table of features, their W-statistics and whether the null hypothesis is rejected. `"W"` is the W-statistic, or number of features that a single feature is tested to be significantly different against. `"Reject null hypothesis"` indicates if feature is differentially abundant across groups (`True`) or not (`False`). pd.DataFrame A table of features and their percentile abundances in each group. If ``percentiles`` is empty, this will be an empty ``pd.DataFrame``. The rows in this object will be features, and the columns will be a multi-index where the first index is the percentile, and the second index is the group. See Also -------- multiplicative_replacement scipy.stats.ttest_ind scipy.stats.f_oneway scipy.stats.wilcoxon scipy.stats.kruskal Notes ----- The developers of this method recommend the following significance tests ([2]_, Supplementary File 1, top of page 11): if there are 2 groups, use the standard parametric t-test (``scipy.stats.ttest_ind``) or non-parametric Wilcoxon rank sum test (``scipy.stats.wilcoxon``). If there are more than 2 groups, use parametric one-way ANOVA (``scipy.stats.f_oneway``) or nonparametric Kruskal-Wallis (``scipy.stats.kruskal``). Because one-way ANOVA is equivalent to the standard t-test when the number of groups is two, we default to ``scipy.stats.f_oneway`` here, which can be used when there are two or more groups. Users should refer to the documentation of these tests in SciPy to understand the assumptions made by each test. This method cannot handle any zero counts as input, since the logarithm of zero cannot be computed. While this is an unsolved problem, many studies, including [2]_, have shown promising results by adding pseudocounts to all values in the matrix. In [2]_, a pseudocount of 0.001 was used, though the authors note that a pseudocount of 1.0 may also be useful. Zero counts can also be addressed using the ``multiplicative_replacement`` method. References ---------- .. [1] Holm, S. "A simple sequentially rejective multiple test procedure". Scandinavian Journal of Statistics (1979), 6. .. [2] Mandal et al. "Analysis of composition of microbiomes: a novel method for studying microbial composition", Microbial Ecology in Health & Disease, (2015), 26. Examples -------- First import all of the necessary modules: >>> from skbio.stats.composition import ancom >>> import pandas as pd Now let's load in a DataFrame with 6 samples and 7 features (e.g., these may be bacterial OTUs): >>> table = pd.DataFrame([[12, 11, 10, 10, 10, 10, 10], ... [9, 11, 12, 10, 10, 10, 10], ... [1, 11, 10, 11, 10, 5, 9], ... [22, 21, 9, 10, 10, 10, 10], ... [20, 22, 10, 10, 13, 10, 10], ... [23, 21, 14, 10, 10, 10, 10]], ... index=['s1', 's2', 's3', 's4', 's5', 's6'], ... columns=['b1', 'b2', 'b3', 'b4', 'b5', 'b6', ... 'b7']) Then create a grouping vector. In this example, there is a treatment group and a placebo group. >>> grouping = pd.Series(['treatment', 'treatment', 'treatment', ... 'placebo', 'placebo', 'placebo'], ... index=['s1', 's2', 's3', 's4', 's5', 's6']) Now run ``ancom`` to determine if there are any features that are significantly different in abundance between the treatment and the placebo groups. The first DataFrame that is returned contains the ANCOM test results, and the second contains the percentile abundance data for each feature in each group. >>> ancom_df, percentile_df = ancom(table, grouping) >>> ancom_df['W'] b1 0 b2 4 b3 0 b4 1 b5 1 b6 0 b7 1 Name: W, dtype: int64 The W-statistic is the number of features that a single feature is tested to be significantly different against. In this scenario, `b2` was detected to have significantly different abundances compared to four of the other features. To summarize the results from the W-statistic, let's take a look at the results from the hypothesis test. The `Reject null hypothesis` column in the table indicates whether the null hypothesis was rejected, and that a feature was therefore observed to be differentially abundant across the groups. >>> ancom_df['Reject null hypothesis'] b1 False b2 True b3 False b4 False b5 False b6 False b7 False Name: Reject null hypothesis, dtype: bool From this we can conclude that only `b2` was significantly different in abundance between the treatment and the placebo. We still don't know, for example, in which group `b2` was more abundant. We therefore may next be interested in comparing the abundance of `b2` across the two groups. We can do that using the second DataFrame that was returned. Here we compare the median (50th percentile) abundance of `b2` in the treatment and placebo groups: >>> percentile_df[50.0].loc['b2'] Group placebo 21.0 treatment 11.0 Name: b2, dtype: float64 We can also look at a full five-number summary for ``b2`` in the treatment and placebo groups: >>> percentile_df.loc['b2'] # doctest: +NORMALIZE_WHITESPACE Percentile Group 0.0 placebo 21.0 25.0 placebo 21.0 50.0 placebo 21.0 75.0 placebo 21.5 100.0 placebo 22.0 0.0 treatment 11.0 25.0 treatment 11.0 50.0 treatment 11.0 75.0 treatment 11.0 100.0 treatment 11.0 Name: b2, dtype: float64 Taken together, these data tell us that `b2` is present in significantly higher abundance in the placebo group samples than in the treatment group samples. """ if not isinstance(table, pd.DataFrame): raise TypeError('`table` must be a `pd.DataFrame`, ' 'not %r.' % type(table).__name__) if not isinstance(grouping, pd.Series): raise TypeError('`grouping` must be a `pd.Series`,' ' not %r.' % type(grouping).__name__) if np.any(table <= 0): raise ValueError('Cannot handle zeros or negative values in `table`. ' 'Use pseudocounts or ``multiplicative_replacement``.' ) if not 0 < alpha < 1: raise ValueError('`alpha`=%f is not within 0 and 1.' % alpha) if not 0 < tau < 1: raise ValueError('`tau`=%f is not within 0 and 1.' % tau) if not 0 < theta < 1: raise ValueError('`theta`=%f is not within 0 and 1.' % theta) if multiple_comparisons_correction is not None: if multiple_comparisons_correction != 'holm-bonferroni': raise ValueError('%r is not an available option for ' '`multiple_comparisons_correction`.' % multiple_comparisons_correction) if (grouping.isnull()).any(): raise ValueError('Cannot handle missing values in `grouping`.') if (table.isnull()).any().any(): raise ValueError('Cannot handle missing values in `table`.') percentiles = list(percentiles) for percentile in percentiles: if not 0.0 <= percentile <= 100.0: raise ValueError('Percentiles must be in the range [0, 100], %r ' 'was provided.' % percentile) duplicates = skbio.util.find_duplicates(percentiles) if duplicates: formatted_duplicates = ', '.join(repr(e) for e in duplicates) raise ValueError('Percentile values must be unique. The following' ' value(s) were duplicated: %s.' % formatted_duplicates) groups = np.unique(grouping) num_groups = len(groups) if num_groups == len(grouping): raise ValueError( "All values in `grouping` are unique. This method cannot " "operate on a grouping vector with only unique values (e.g., " "there are no 'within' variance because each group of samples " "contains only a single sample).") if num_groups == 1: raise ValueError( "All values the `grouping` are the same. This method cannot " "operate on a grouping vector with only a single group of samples" "(e.g., there are no 'between' variance because there is only a " "single group).") if significance_test is None: significance_test = scipy.stats.f_oneway table_index_len = len(table.index) grouping_index_len = len(grouping.index) mat, cats = table.align(grouping, axis=0, join='inner') if (len(mat) != table_index_len or len(cats) != grouping_index_len): raise ValueError('`table` index and `grouping` ' 'index must be consistent.') n_feat = mat.shape[1] _logratio_mat = _log_compare(mat.values, cats.values, significance_test) logratio_mat = _logratio_mat + _logratio_mat.T # Multiple comparisons if multiple_comparisons_correction == 'holm-bonferroni': logratio_mat = np.apply_along_axis(_holm_bonferroni, 1, logratio_mat) np.fill_diagonal(logratio_mat, 1) W = (logratio_mat < alpha).sum(axis=1) c_start = W.max() / n_feat if c_start < theta: reject = np.zeros_like(W, dtype=bool) else: # Select appropriate cutoff cutoff = c_start - np.linspace(0.05, 0.25, 5) prop_cut = np.array([(W > n_feat*cut).mean() for cut in cutoff]) dels = np.abs(prop_cut - np.roll(prop_cut, -1)) dels[-1] = 0 if (dels[0] < tau) and (dels[1] < tau) and (dels[2] < tau): nu = cutoff[1] elif (dels[0] >= tau) and (dels[1] < tau) and (dels[2] < tau): nu = cutoff[2] elif (dels[1] >= tau) and (dels[2] < tau) and (dels[3] < tau): nu = cutoff[3] else: nu = cutoff[4] reject = (W >= nu*n_feat) feat_ids = mat.columns ancom_df = pd.DataFrame( {'W': pd.Series(W, index=feat_ids), 'Reject null hypothesis': pd.Series(reject, index=feat_ids)}) if len(percentiles) == 0: return ancom_df, pd.DataFrame() else: data = [] columns = [] for group in groups: feat_dists = mat[cats == group] for percentile in percentiles: columns.append((percentile, group)) data.append(np.percentile(feat_dists, percentile, axis=0)) columns = pd.MultiIndex.from_tuples(columns, names=['Percentile', 'Group']) percentile_df = pd.DataFrame( np.asarray(data).T, columns=columns, index=feat_ids) return ancom_df, percentile_df def _holm_bonferroni(p): """ Performs Holm-Bonferroni correction for pvalues to account for multiple comparisons Parameters --------- p: numpy.array array of pvalues Returns ------- numpy.array corrected pvalues """ K = len(p) sort_index = -np.ones(K, dtype=np.int64) sorted_p = np.sort(p) sorted_p_adj = sorted_p*(K-np.arange(K)) for j in range(K): idx = (p == sorted_p[j]) & (sort_index < 0) num_ties = len(sort_index[idx]) sort_index[idx] = np.arange(j, (j+num_ties), dtype=np.int64) sorted_holm_p = [min([max(sorted_p_adj[:k]), 1]) for k in range(1, K+1)] holm_p = [sorted_holm_p[sort_index[k]] for k in range(K)] return holm_p def _log_compare(mat, cats, significance_test=scipy.stats.ttest_ind): """ Calculates pairwise log ratios between all features and performs a significiance test (i.e. t-test) to determine if there is a significant difference in feature ratios with respect to the variable of interest. Parameters ---------- mat: np.array rows correspond to samples and columns correspond to features (i.e. OTUs) cats: np.array, float Vector of categories significance_test: function statistical test to run Returns: -------- log_ratio : np.array log ratio pvalue matrix """ r, c = mat.shape log_ratio = np.zeros((c, c)) log_mat = np.log(mat) cs = np.unique(cats) def func(x): return significance_test(*[x[cats == k] for k in cs]) for i in range(c-1): ratio = (log_mat[:, i].T - log_mat[:, i+1:].T).T m, p = np.apply_along_axis(func, axis=0, arr=ratio) log_ratio[i, i+1:] = np.squeeze(np.array(p.T)) return log_ratio def _gram_schmidt_basis(n): """ Builds clr transformed basis derived from gram schmidt orthogonalization Parameters ---------- n : int Dimension of the Aitchison simplex """ basis = np.zeros((n, n-1)) for j in range(n-1): i = j + 1 e = np.array([(1/i)]*i + [-1] + [0]*(n-i-1))*np.sqrt(i/(i+1)) basis[:, j] = e return basis.T def _check_orthogonality(basis): """ Checks to see if basis is truly orthonormal in the Aitchison simplex Parameters ---------- basis: numpy.ndarray basis in the Aitchison simplex """ basis = np.atleast_2d(basis) if not np.allclose(inner(basis, basis), np.identity(len(basis)), rtol=1e-4, atol=1e-6): raise ValueError("Aitchison basis is not orthonormal")
#Copyright ReportLab Europe Ltd. 2000-2017 #see license.txt for license details __version__='3.3.0' #modification of users/robin/ttflist.py. __doc__="""This provides some general-purpose tools for finding fonts. The FontFinder object can search for font files. It aims to build a catalogue of fonts which our framework can work with. It may be useful if you are building GUIs or design-time interfaces and want to present users with a choice of fonts. There are 3 steps to using it 1. create FontFinder and set options and directories 2. search 3. query >>> import fontfinder >>> ff = fontfinder.FontFinder() >>> ff.addDirectories([dir1, dir2, dir3]) >>> ff.search() >>> ff.getFamilyNames() #or whichever queries you want... Because the disk search takes some time to find and parse hundreds of fonts, it can use a cache to store a file with all fonts found. The cache file name For each font found, it creates a structure with - the short font name - the long font name - the principal file (.pfb for type 1 fonts), and the metrics file if appropriate - the time modified (unix time stamp) - a type code ('ttf') - the family name - bold and italic attributes One common use is to display families in a dialog for end users; then select regular, bold and italic variants of the font. To get the initial list, use getFamilyNames; these will be in alpha order. >>> ff.getFamilyNames() ['Bitstream Vera Sans', 'Century Schoolbook L', 'Dingbats', 'LettErrorRobot', 'MS Gothic', 'MS Mincho', 'Nimbus Mono L', 'Nimbus Roman No9 L', 'Nimbus Sans L', 'Vera', 'Standard Symbols L', 'URW Bookman L', 'URW Chancery L', 'URW Gothic L', 'URW Palladio L'] One can then obtain a specific font as follows >>> f = ff.getFont('Bitstream Vera Sans', bold=False, italic=True) >>> f.fullName 'Bitstream Vera Sans' >>> f.fileName 'C:\\code\\reportlab\\fonts\\Vera.ttf' >>> It can also produce an XML report of fonts found by family, for the benefit of non-Python applications. Future plans might include using this to auto-register fonts; and making it update itself smartly on repeated instantiation. """ import sys, time, os, tempfile from reportlab.lib.utils import pickle from xml.sax.saxutils import quoteattr try: from hashlib import md5 except ImportError: from md5 import md5 EXTENSIONS = ['.ttf','.ttc','.otf','.pfb','.pfa'] # PDF font flags (see PDF Reference Guide table 5.19) FF_FIXED = 1 << 1-1 FF_SERIF = 1 << 2-1 FF_SYMBOLIC = 1 << 3-1 FF_SCRIPT = 1 << 4-1 FF_NONSYMBOLIC = 1 << 6-1 FF_ITALIC = 1 << 7-1 FF_ALLCAP = 1 << 17-1 FF_SMALLCAP = 1 << 18-1 FF_FORCEBOLD = 1 << 19-1 class FontDescriptor: """This is a short descriptive record about a font. typeCode should be a file extension e.g. ['ttf','ttc','otf','pfb','pfa'] """ def __init__(self): self.name = None self.fullName = None self.familyName = None self.styleName = None self.isBold = False #true if it's somehow bold self.isItalic = False #true if it's italic or oblique or somehow slanty self.isFixedPitch = False self.isSymbolic = False #false for Dingbats, Symbols etc. self.typeCode = None #normally the extension minus the dot self.fileName = None #full path to where we found it. self.metricsFileName = None #defined only for type='type1pc', or 'type1mac' self.timeModified = 0 def __repr__(self): return "FontDescriptor(%s)" % self.name def getTag(self): "Return an XML tag representation" attrs = [] for k, v in self.__dict__.items(): if k not in ['timeModified']: if v: attrs.append('%s=%s' % (k, quoteattr(str(v)))) return '<font ' + ' '.join(attrs) + '/>' from reportlab.lib.utils import rl_isdir, rl_isfile, rl_listdir, rl_getmtime class FontFinder: def __init__(self, dirs=[], useCache=True, validate=False): self.useCache = useCache self.validate = validate self._dirs = set(dirs) self._fonts = [] self._skippedFiles = [] #list of filenames we did not handle self._badFiles = [] #list of filenames we rejected self._fontsByName = {} self._fontsByFamily = {} self._fontsByFamilyBoldItalic = {} #indexed by bold, italic def addDirectory(self, dirName): #aesthetics - if there are 2 copies of a font, should the first or last #be picked up? might need reversing if rl_isdir(dirName): self._dirs.add(dirName) def addDirectories(self, dirNames): for dirName in dirNames: self.addDirectory(dirName) def getFamilyNames(self): "Returns a list of the distinct font families found" if not self._fontsByFamily: fonts = self._fonts for font in fonts: fam = font.familyName if fam in self._fontsByFamily: self._fontsByFamily[fam].append(font) else: self._fontsByFamily[fam] = [font] names = list(self._fontsByFamily.keys()) names.sort() return names def getFontsInFamily(self, familyName): "Return list of all font objects with this family name" return self._fontsByFamily.get(familyName,[]) def getFamilyXmlReport(self): """Reports on all families found as XML. """ lines = [] lines.append('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>') lines.append("<font_families>") for dirName in self._dirs: lines.append(" <directory name=%s/>" % quoteattr(dirName)) for familyName in self.getFamilyNames(): if familyName: #skip null case lines.append(' <family name=%s>' % quoteattr(familyName)) for font in self.getFontsInFamily(familyName): lines.append(' ' + font.getTag()) lines.append(' </family>') lines.append("</font_families>") return '\n'.join(lines) def getFontsWithAttributes(self, **kwds): """This is a general lightweight search.""" selected = [] for font in self._fonts: OK = True for k, v in kwds.items(): if getattr(font, k, None) != v: OK = False if OK: selected.append(font) return selected def getFont(self, familyName, bold=False, italic=False): """Try to find a font matching the spec""" for font in self._fonts: if font.familyName == familyName: if font.isBold == bold: if font.isItalic == italic: return font raise KeyError("Cannot find font %s with bold=%s, italic=%s" % (familyName, bold, italic)) def _getCacheFileName(self): """Base this on the directories...same set of directories should give same cache""" hash = md5(''.join(self._dirs)).hexdigest() from reportlab.lib.utils import get_rl_tempfile fn = get_rl_tempfile('fonts_%s.dat' % hash) return fn def save(self, fileName): f = open(fileName, 'w') pickle.dump(self, f) f.close() def load(self, fileName): f = open(fileName, 'r') finder2 = pickle.load(f) f.close() self.__dict__.update(finder2.__dict__) def search(self): started = time.clock() if not self._dirs: raise ValueError("Font search path is empty! Please specify search directories using addDirectory or addDirectories") if self.useCache: cfn = self._getCacheFileName() if rl_isfile(cfn): try: self.load(cfn) #print "loaded cached file with %d fonts (%s)" % (len(self._fonts), cfn) return except: pass #pickle load failed. Ho hum, maybe it's an old pickle. Better rebuild it. from stat import ST_MTIME for dirName in self._dirs: fileNames = rl_listdir(dirName) for fileName in fileNames: root, ext = os.path.splitext(fileName) if ext.lower() in EXTENSIONS: #it's a font f = FontDescriptor() f.fileName = os.path.normpath(os.path.join(dirName, fileName)) f.timeModified = rl_getmtime(f.fileName) ext = ext.lower() if ext[0] == '.': ext = ext[1:] f.typeCode = ext #strip the dot #what to do depends on type. We only accept .pfb if we #have .afm to go with it, and don't handle .otf now. if ext in ('otf', 'pfa'): self._skippedFiles.append(fileName) elif ext in ('ttf','ttc'): #parsing should check it for us from reportlab.pdfbase.ttfonts import TTFontFile, TTFError try: font = TTFontFile(fileName,validate=self.validate) except TTFError: self._badFiles.append(fileName) continue f.name = font.name f.fullName = font.fullName f.styleName = font.styleName f.familyName = font.familyName f.isBold = (FF_FORCEBOLD == FF_FORCEBOLD & font.flags) f.isItalic = (FF_ITALIC == FF_ITALIC & font.flags) elif ext == 'pfb': # type 1; we need an AFM file or have to skip. if rl_isfile(os.path.join(dirName, root + '.afm')): f.metricsFileName = os.path.normpath(os.path.join(dirName, root + '.afm')) elif rl_isfile(os.path.join(dirName, root + '.AFM')): f.metricsFileName = os.path.normpath(os.path.join(dirName, root + '.AFM')) else: self._skippedFiles.append(fileName) continue from reportlab.pdfbase.pdfmetrics import parseAFMFile (info, glyphs) = parseAFMFile(f.metricsFileName) f.name = info['FontName'] f.fullName = info.get('FullName', f.name) f.familyName = info.get('FamilyName', None) f.isItalic = (float(info.get('ItalicAngle', 0)) > 0.0) #if the weight has the word bold, deem it bold f.isBold = ('bold' in info.get('Weight','').lower()) self._fonts.append(f) if self.useCache: self.save(cfn) finished = time.clock() ## print "found %d fonts; skipped %d; bad %d. Took %0.2f seconds" % ( ## len(self._fonts), len(self._skippedFiles), len(self._badFiles), ## finished - started ## ) def test(): #windows-centric test maybe from reportlab import rl_config ff = FontFinder() ff.useCache = True ff.validate = True import reportlab ff.addDirectory('C:\\windows\\fonts') rlFontDir = os.path.join(os.path.dirname(reportlab.__file__), 'fonts') ff.addDirectory(rlFontDir) ff.search() print('cache file name...') print(ff._getCacheFileName()) print('families...') for familyName in ff.getFamilyNames(): print('\t%s' % familyName) print() outw = sys.stdout.write outw('fonts called Vera:') for font in ff.getFontsInFamily('Bitstream Vera Sans'): outw(' %s' % font.name) print() outw('Bold fonts\n\t') for font in ff.getFontsWithAttributes(isBold=True, isItalic=False): outw(font.fullName+' ') print() print('family report') print(ff.getFamilyXmlReport()) if __name__=='__main__': test()
# -*- coding: utf-8 -*- # Copyright 2011 Mats Ekberg # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement from common import * from ordered_dict import OrderedDict from jsonrpc import FileDataSource import sys import tempfile import array try: if os.getenv("BOAR_DISABLE_DEDUP") == "1": raise ImportError() import cdedup assert cdedup.__version__ == "1.0", "Unexpected deduplication module version (was: %s)" % rollingcs.__version__ cdedup_version = cdedup.__version__ from cdedup import RollingChecksum, calc_rolling, IntegerSet, BlocksDB dedup_available = True except ImportError: cdedup_version = None dedup_available = False def CreateIntegerSet(ints): """This method will return an IntegerSet containing the given ints. The IntegerSet is suitable for use with the RecipeFinder class. An IntegerSet created with this function should not be used for other purposes. Note that in case the c extension is unavailable (dedup_available == False), then a FakeIntegerSet instance will be returned instead.""" # bucket count must be a power of two if dedup_available: intset = IntegerSet(max(len(ints), 100000)) else: intset = FakeIntegerSet(len(ints)) intset.add_all(ints) return intset class FakeRollingChecksum: """This is a dummy version of RollingChecksum. An instance of this class will never report any hits.""" def __init__(self, window_size, intset): pass def feed_string(self, s): pass def __iter__(self): return self def next(self): raise StopIteration() class FakeIntegerSet: """This is a dummy version of IntegerSet. An instance of this class will always return False when contains() is called.""" def __init__(self, bucket_count): pass def add_all(self, integers): pass def contains(self, n): return False class FakeBlockChecksum: """This is a dummy version of BlockChecksum. An instance of this class will always return an empty list when harvest() is called.""" def __init__(self, window_size): pass def feed_string(self, s): pass def harvest(self): return [] class TmpBlocksDB: def __init__(self, blocksdb): self.blocksdb = blocksdb self.blocks = {} # md5 -> [(blob, offset), ...] def add_tmp_block(self, md5, blob, offset): assert is_md5sum(md5) assert is_md5sum(blob) if md5 not in self.blocks: self.blocks[md5] = [] self.blocks[md5].append((blob, offset)) def get_block_size(self): return self.blocksdb.get_block_size() def get_block_locations(self, md5, limit = -1): return self.blocks.get(md5, []) + self.blocksdb.get_block_locations(md5, limit) def has_block(self, md5): return md5 in self.blocks or self.blocksdb.has_block(md5) class FakeBlocksDB: def __init__(self, dbfile, block_size): self.block_size = block_size def get_all_rolling(self): return [] def has_block(self, md5): return False def get_block_locations(self, md5, limit = -1): return [] def add_rolling(self, rolling): pass def delete_blocks(self, blobs): pass def add_block(self, blob, offset, md5): pass def begin(self): pass def commit(self): pass def get_block_size(self): return self.block_size class BlockChecksum: def __init__(self, window_size): self.buffer = TailBuffer() self.window_size = window_size self.position = 0 self.blocks = [] def feed_string(self, s): self.buffer.append(s) while len(self.buffer) - self.position >= self.window_size: block = self.buffer[self.position:self.position+self.window_size] block_md5 = md5sum(block) block_rolling = calc_rolling(block, self.window_size) self.blocks.append((self.position, block_rolling, block_md5)) self.position += self.window_size self.buffer.release(self.position) def harvest(self): result = self.blocks self.blocks = [] return result if not dedup_available: del BlockChecksum class UniformBlobGetter: def __init__(self, repo, local_blob_dir = None): self.repo = repo self.local_blob_dir = local_blob_dir def get_blob_size(self, blob_name): assert is_md5sum(blob_name) if self.local_blob_dir: local_path = os.path.join(self.local_blob_dir, blob_name) if os.path.exists(local_path): return long(os.path.getsize(local_path)) return self.repo.get_blob_size(blob_name) def get_blob_reader(self, blob_name, offset, size): assert is_md5sum(blob_name) if self.local_blob_dir: local_path = os.path.join(self.local_blob_dir, blob_name) if os.path.exists(local_path): fo = safe_open(local_path, "rb") fo.seek(offset) return FileDataSource(fo, size) return self.repo.get_blob_reader(blob_name, offset, size) class OriginalPieceHandler: def init_piece(self, index): pass def add_piece_data(self, index, data): pass def end_piece(self, index): pass def close(self): pass def get_piece_address(self, index): """After the handler has been closed, this method must return a tuple of (blob, offset) for every piece that has been processed. Those values will be used for this piece in the recipe.""" pass from statemachine import GenericStateMachine START_STATE = "START" ORIGINAL_STATE = "ORIGINAL" DEDUP_STATE = "MATCH" END_STATE = "END" ORIGINAL_DATA_FOUND_EVENT = "ORIGINAL_EVENT" DEDUP_BLOCK_FOUND_EVENT = "DEDUP_BLOCK_FOUND_EVENT" EOF_EVENT = "EOF_EVENT" class RecipeFinder(GenericStateMachine): def __init__(self, blocksdb, block_size, intset, blob_source, original_piece_handler, tmpdir = None, RollingChecksumClass = None): GenericStateMachine.__init__(self) self.blob_source = blob_source if not RollingChecksumClass: RollingChecksumClass = RollingChecksum # State machine init self.add_state(START_STATE) self.add_state(ORIGINAL_STATE) self.add_state(DEDUP_STATE) self.add_state(END_STATE) self.add_event(ORIGINAL_DATA_FOUND_EVENT) self.add_event(DEDUP_BLOCK_FOUND_EVENT) self.add_event(EOF_EVENT) self.add_transition(START_STATE, ORIGINAL_DATA_FOUND_EVENT, ORIGINAL_STATE) self.add_transition(START_STATE, DEDUP_BLOCK_FOUND_EVENT, DEDUP_STATE) self.add_transition(ORIGINAL_STATE, ORIGINAL_DATA_FOUND_EVENT, ORIGINAL_STATE) self.add_transition(DEDUP_STATE, DEDUP_BLOCK_FOUND_EVENT, DEDUP_STATE) self.add_transition(ORIGINAL_STATE, DEDUP_BLOCK_FOUND_EVENT, DEDUP_STATE) self.add_transition(DEDUP_STATE, ORIGINAL_DATA_FOUND_EVENT, ORIGINAL_STATE) self.add_transition(ORIGINAL_STATE, EOF_EVENT, END_STATE) self.add_transition(DEDUP_STATE, EOF_EVENT, END_STATE) self.add_transition(START_STATE, EOF_EVENT, END_STATE) self.last_seen_offset = 0 def assert_offset_ok(**args): assert self.last_seen_offset <= args['offset'], args['offset'] self.last_seen_offset = args['offset'] self.add_exit_handler(START_STATE, assert_offset_ok) self.add_exit_handler(ORIGINAL_STATE, assert_offset_ok) self.add_exit_handler(DEDUP_STATE, assert_offset_ok) self.add_transition_handler(START_STATE, ORIGINAL_DATA_FOUND_EVENT, ORIGINAL_STATE, self.__on_original_data_start) self.add_transition_handler(DEDUP_STATE, ORIGINAL_DATA_FOUND_EVENT, ORIGINAL_STATE, self.__on_original_data_start) self.add_transition_handler(ORIGINAL_STATE, EOF_EVENT, END_STATE, self.__on_original_data_end) self.add_transition_handler(ORIGINAL_STATE, DEDUP_BLOCK_FOUND_EVENT, DEDUP_STATE, self.__on_original_data_end) self.add_transition_handler(START_STATE, DEDUP_BLOCK_FOUND_EVENT, DEDUP_STATE, self.__on_dedup_data_start) self.add_transition_handler(ORIGINAL_STATE, DEDUP_BLOCK_FOUND_EVENT, DEDUP_STATE, self.__on_dedup_data_start) self.add_transition_handler(DEDUP_STATE, ORIGINAL_DATA_FOUND_EVENT, ORIGINAL_STATE, self.__on_dedup_data_end) self.add_transition_handler(DEDUP_STATE, EOF_EVENT, END_STATE, self.__on_dedup_data_end) self.add_transition_handler(START_STATE, EOF_EVENT, END_STATE, self.__on_original_data_start) self.add_transition_handler(START_STATE, EOF_EVENT, END_STATE, self.__on_original_data_end) self.add_enter_handler(DEDUP_STATE, self.__on_dedup_block) self.add_exit_handler(ORIGINAL_STATE, self.__on_original_data_part_end) self.add_enter_handler(END_STATE, self.__on_end_of_file) self.start(START_STATE) self.blocksdb = blocksdb self.block_size = block_size self.rs = RollingChecksumClass(block_size, intset) self.original_piece_handler = original_piece_handler self.tail_buffer = TailBuffer() self.end_of_last_hit = 0 # The end of the last matched block self.last_flush_end = 0 self.md5summer = hashlib.md5() self.restored_md5summer = hashlib.md5() self.closed = False self.feed_byte_count = 0 self.sequences = [] self.seq_number = -1 self.recipe = None def __on_original_data_start(self, **args): self.original_start = args['offset'] self.last_flush_end = args['offset'] self.seq_number += 1 self.original_piece_handler.init_piece(self.seq_number) def __on_original_data_part_end(self, **args): #print args #print "Releasing ", self.last_flush_end self.tail_buffer.release(self.last_flush_end) data = self.tail_buffer[self.last_flush_end : args['offset']] self.last_flush_end = args['offset'] self.end_of_last_hit = args['offset'] self.original_piece_handler.add_piece_data(self.seq_number, data) self.restored_md5summer.update(data) #print "Flushing", len(data), "bytes of original data" def __on_original_data_end(self, **args): size = args['offset'] - self.original_start del self.original_start del self.last_flush_end self.original_piece_handler.end_piece(self.seq_number) self.sequences.append(Struct(piece_handler = self.original_piece_handler, piece_index = self.seq_number, piece_size = size)) def __on_dedup_data_start(self, **args): self.seq_number += 1 self.sequences.append([]) def __on_dedup_block(self, **args): self.sequences[-1].append(args['md5']) self.end_of_last_hit = args['offset'] + self.block_size self.restored_md5summer.update(args['block_data']) def __on_dedup_data_end(self, **args): pass def __on_end_of_file(self, **args): pass def feed(self, s): #print "Feeding", len(s), "bytes" assert type(s) == str assert not self.closed self.feed_byte_count += len(s) self.rs.feed_string(s) self.md5summer.update(s) self.tail_buffer.append(s) for offset, rolling in self.rs: if offset < self.end_of_last_hit: # Ignore overlapping blocks continue block_data = self.tail_buffer[offset : offset + self.block_size] md5 = md5sum(block_data) self.end_of_last_hit >= 0 if self.blocksdb.has_block(md5): assert self.end_of_last_hit >= 0 if offset - self.end_of_last_hit > 0: # If this hit is NOT a continuation of the last # one, there must be original data in between. #print "Gap found between block hits" self.dispatch(ORIGINAL_DATA_FOUND_EVENT, offset = self.end_of_last_hit) self.dispatch(DEDUP_BLOCK_FOUND_EVENT, md5 = md5, offset = offset, block_data = block_data) #print "State after feeding is", self.get_state() # We know here that all data, except the last block_size # bytes (which may still be part of a hit when we feed # more data), are original. Let's tell the state machine # that. By doing this, we chop up the sequence, as opposed # to just doing one unpredictably huge sequence at the # end. # print "Half-time flush!" if self.end_of_last_hit < self.feed_byte_count - self.block_size: # print "Last hit leaves a gap - state is", self.get_state() if self.get_state() != ORIGINAL_STATE: self.dispatch(ORIGINAL_DATA_FOUND_EVENT, offset = self.end_of_last_hit) #print "Before flush:", self.get_state() self.dispatch(ORIGINAL_DATA_FOUND_EVENT, offset = self.feed_byte_count - self.block_size) #print "Half-time flush complete" def close(self): #print "Closing" # TODO: This stuff should be moved to on_file_end() assert not self.closed self.closed = True if self.end_of_last_hit != self.feed_byte_count: offset = max(self.feed_byte_count - self.block_size, self.end_of_last_hit) self.dispatch(ORIGINAL_DATA_FOUND_EVENT, offset = offset) self.dispatch(EOF_EVENT,offset = self.feed_byte_count) assert len(self.tail_buffer) == self.feed_byte_count assert self.get_state() == END_STATE assert self.restored_md5summer.hexdigest() == self.md5summer.hexdigest() self.original_piece_handler.close() #print_recipe(self.get_recipe()) restored_size = 0 for piece in self.get_recipe()['pieces']: restored_size += piece['size'] * piece['repeat'] assert restored_size == self.feed_byte_count, "Restored is %s, feeded is %s" % (restored_size, self.feed_byte_count) del self.rs def __seq2rec(self): restored_size = 0 def get_dict(source, offset, size, original): #assert is_md5sum(source) assert offset >= 0 assert size >= 0 assert type(original) == bool return OrderedDict([("source", source), ("offset", offset), ("size", size), ("original", original), ("repeat", 1)]) for s in self.sequences: if isinstance(s, Struct): # Original data restored_size += s.piece_size blob, offset = s.piece_handler.get_piece_address(s.piece_index) yield get_dict(blob, offset, s.piece_size, True) elif type(s) == list: # Duplicated data assert s seqfinder = BlockSequenceFinder(self.blocksdb) for md5 in s: if not seqfinder.can_add(md5): blob, offset, size = seqfinder.get_matches().next() restored_size += size yield get_dict(blob, offset, size, False) seqfinder = BlockSequenceFinder(self.blocksdb) seqfinder.add_block(md5) matches = list(seqfinder.get_matches()) # We only need one if matches: blob, offset, size = matches[0] restored_size += size yield get_dict(blob, offset, size, False) else: assert False, s assert restored_size == self.feed_byte_count def __polish_recipe_tail(self): assert self.recipe pieces = self.recipe['pieces'] if len(pieces) < 2: return if not (pieces[-1]['original'] == True and pieces[-2]['original'] == False): return # The last piece is original, and the second to last piece is # not. It could be possible to extend the last hit all the way. blob = pieces[-2]['source'] required_blob_size = pieces[-2]['offset'] + pieces[-2]['size'] + pieces[-1]['size'] if self.blob_source.get_blob_size(blob) < required_blob_size: # Cannot possibly be a full hit return blob_start = pieces[-2]['offset'] + pieces[-2]['size'] blob_read_size = pieces[-1]['size'] data1 = self.blob_source.get_blob_reader(pieces[-1]['source'], pieces[-1]['offset'], blob_read_size).read() data2 = self.blob_source.get_blob_reader(blob, blob_start, blob_read_size).read() if data1 != data2: return # We can extend! pieces[-2]['size'] += pieces[-1]['size'] del pieces[-1] def __polish_recipe_repeats(self): assert self.recipe pieces = self.recipe['pieces'] if len(pieces) == 0: return new_pieces = [pieces.pop(0)] assert new_pieces[-1]['repeat'] == 1 for piece in pieces: assert piece['repeat'] == 1 if new_pieces[-1]['source'] == piece['source'] and \ new_pieces[-1]['size'] == piece['size'] and \ new_pieces[-1]['offset'] == piece['offset'] and \ new_pieces[-1]['original'] == piece['original'] and \ new_pieces[-1]['original'] == False: new_pieces[-1]['repeat'] += 1 else: new_pieces.append(piece) self.recipe['pieces'] = new_pieces def get_recipe(self): assert self.closed if self.recipe == None: self.recipe = OrderedDict([("md5sum", self.md5summer.hexdigest()), ("size", len(self.tail_buffer)), ("method", "concat"), ("pieces", list(self.__seq2rec()))]) # We now have a complete and useful recipe. But can it be improved? self.__polish_recipe_tail() self.__polish_recipe_repeats() return self.recipe class BlockSequenceFinder: def __init__(self, blocksdb): self.blocksdb = blocksdb # The candidates are tuples on the form (blob, offset), where # offset is the end of the last matched block. self.candidates = set() self.feeded_blocks = 0 self.firstblock = True self.block_size = blocksdb.get_block_size() def get_matches(self): length = self.block_size * self.feeded_blocks for blob, end_pos in sorted(self.candidates): # By sorting, we get a predictable order which makes # testing easier. As a secondary effect, we also # concentrate the hits to fewer blobs (the ones with lower # blob-ids), which may have positive cache effects on # access. start_pos = end_pos - length assert start_pos >= 0 yield blob, start_pos, length def can_add(self, block_md5): return self.firstblock or bool(self.__filter_and_extend_candidates(block_md5)) def __filter_and_extend_candidates(self, block_md5): """ Returns the candidates that can be extended with the given block.""" surviving_candidates = set() for block in self.candidates.intersection(set(self.blocksdb.get_block_locations(block_md5))): blob, offset = block surviving_candidates.add((blob, offset + self.block_size)) return surviving_candidates def add_block(self, block_md5): self.feeded_blocks += 1 if self.firstblock: self.firstblock = False for blob, offset in self.blocksdb.get_block_locations(block_md5): self.candidates.add((blob, offset + self.block_size)) else: self.candidates = self.__filter_and_extend_candidates(block_md5) assert self.candidates, "No remaining candidates" #print "Candidates are", list(self.get_matches()) def print_recipe(recipe): print "Md5sum:", recipe["md5sum"] print "Size :", recipe["size"] print "Method:", recipe['method'] print "Pieces:", len(recipe['pieces']) pos = 0 dedup_size = 0 for p in recipe['pieces']: print " Original :", p['original'] print " Source blob :", p['source'] if p['source'] else "SELF" print " Source offset:", p['offset'] print " Size :", p['size'] print " Position : %s - %s" % (hex(pos), hex(pos + p['size'] * p.get('repeat', 1))) print " Repeat :", p.get('repeat', "(1)") print " ---------------" pos += p['size'] if p['source'] == None: # Count the parts we couldn't find elsewhere dedup_size += p['size'] try: print "Dedup removed %s%% of original size" % round((100.0 * (1.0 - float(dedup_size) / recipe["size"])), 1) except ZeroDivisionError: print "Zero size recipe" pass def benchmark(): import time b = BlockChecksum(2**16) data = "x" * 12345 t0 = time.time() size = 0 for n in range(0,10000): b.feed_string(data) size += len(data) b.harvest() print size, time.time() - t0 #res=cProfile.run('main()', "prof.txt") #import pstats #p = pstats.Stats('prof.txt') #p.sort_stats('cum').print_stats(20) #sys.exit(res) def main(): import cProfile, pstats res=cProfile.run('benchmark()', "deduplication_prof.txt") p = pstats.Stats('deduplication_prof.txt') p.sort_stats('cum').print_stats(20) #main() #benchmark()
# -*- coding: utf-8 -*- from __future__ import print_function import operator import pytest from pandas.compat import (zip, range, lrange, StringIO) from pandas import DataFrame, Series, Index, MultiIndex, date_range import pandas as pd import numpy as np from numpy.random import randn from pandas.util.testing import (assert_series_equal, assert_frame_equal, makeCustomDataframe as mkdf) import pandas.util.testing as tm from pandas.core.computation.check import _NUMEXPR_INSTALLED from pandas.tests.frame.common import TestData PARSERS = 'python', 'pandas' ENGINES = 'python', 'numexpr' @pytest.fixture(params=PARSERS, ids=lambda x: x) def parser(request): return request.param @pytest.fixture(params=ENGINES, ids=lambda x: x) def engine(request): return request.param def skip_if_no_pandas_parser(parser): if parser != 'pandas': pytest.skip("cannot evaluate with parser {0!r}".format(parser)) def skip_if_no_ne(engine='numexpr'): if engine == 'numexpr': if not _NUMEXPR_INSTALLED: pytest.skip("cannot query engine numexpr when numexpr not " "installed") class TestCompat(object): def setup_method(self, method): self.df = DataFrame({'A': [1, 2, 3]}) self.expected1 = self.df[self.df.A > 0] self.expected2 = self.df.A + 1 def test_query_default(self): # GH 12749 # this should always work, whether _NUMEXPR_INSTALLED or not df = self.df result = df.query('A>0') assert_frame_equal(result, self.expected1) result = df.eval('A+1') assert_series_equal(result, self.expected2, check_names=False) def test_query_None(self): df = self.df result = df.query('A>0', engine=None) assert_frame_equal(result, self.expected1) result = df.eval('A+1', engine=None) assert_series_equal(result, self.expected2, check_names=False) def test_query_python(self): df = self.df result = df.query('A>0', engine='python') assert_frame_equal(result, self.expected1) result = df.eval('A+1', engine='python') assert_series_equal(result, self.expected2, check_names=False) def test_query_numexpr(self): df = self.df if _NUMEXPR_INSTALLED: result = df.query('A>0', engine='numexpr') assert_frame_equal(result, self.expected1) result = df.eval('A+1', engine='numexpr') assert_series_equal(result, self.expected2, check_names=False) else: pytest.raises(ImportError, lambda: df.query('A>0', engine='numexpr')) pytest.raises(ImportError, lambda: df.eval('A+1', engine='numexpr')) class TestDataFrameEval(TestData): def test_ops(self): # tst ops and reversed ops in evaluation # GH7198 # smaller hits python, larger hits numexpr for n in [4, 4000]: df = DataFrame(1, index=range(n), columns=list('abcd')) df.iloc[0] = 2 m = df.mean() for op_str, op, rop in [('+', '__add__', '__radd__'), ('-', '__sub__', '__rsub__'), ('*', '__mul__', '__rmul__'), ('/', '__truediv__', '__rtruediv__')]: base = (DataFrame(np.tile(m.values, n) # noqa .reshape(n, -1), columns=list('abcd'))) expected = eval("base{op}df".format(op=op_str)) # ops as strings result = eval("m{op}df".format(op=op_str)) assert_frame_equal(result, expected) # these are commutative if op in ['+', '*']: result = getattr(df, op)(m) assert_frame_equal(result, expected) # these are not elif op in ['-', '/']: result = getattr(df, rop)(m) assert_frame_equal(result, expected) # GH7192 df = DataFrame(dict(A=np.random.randn(25000))) df.iloc[0:5] = np.nan expected = (1 - np.isnan(df.iloc[0:25])) result = (1 - np.isnan(df)).iloc[0:25] assert_frame_equal(result, expected) def test_query_non_str(self): # GH 11485 df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'b']}) msg = "expr must be a string to be evaluated" with tm.assert_raises_regex(ValueError, msg): df.query(lambda x: x.B == "b") with tm.assert_raises_regex(ValueError, msg): df.query(111) def test_query_empty_string(self): # GH 13139 df = pd.DataFrame({'A': [1, 2, 3]}) msg = "expr cannot be an empty string" with tm.assert_raises_regex(ValueError, msg): df.query('') def test_eval_resolvers_as_list(self): # GH 14095 df = DataFrame(randn(10, 2), columns=list('ab')) dict1 = {'a': 1} dict2 = {'b': 2} assert (df.eval('a + b', resolvers=[dict1, dict2]) == dict1['a'] + dict2['b']) assert (pd.eval('a + b', resolvers=[dict1, dict2]) == dict1['a'] + dict2['b']) class TestDataFrameQueryWithMultiIndex(object): def test_query_with_named_multiindex(self, parser, engine): tm.skip_if_no_ne(engine) skip_if_no_pandas_parser(parser) a = np.random.choice(['red', 'green'], size=10) b = np.random.choice(['eggs', 'ham'], size=10) index = MultiIndex.from_arrays([a, b], names=['color', 'food']) df = DataFrame(randn(10, 2), index=index) ind = Series(df.index.get_level_values('color').values, index=index, name='color') # equality res1 = df.query('color == "red"', parser=parser, engine=engine) res2 = df.query('"red" == color', parser=parser, engine=engine) exp = df[ind == 'red'] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) # inequality res1 = df.query('color != "red"', parser=parser, engine=engine) res2 = df.query('"red" != color', parser=parser, engine=engine) exp = df[ind != 'red'] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) # list equality (really just set membership) res1 = df.query('color == ["red"]', parser=parser, engine=engine) res2 = df.query('["red"] == color', parser=parser, engine=engine) exp = df[ind.isin(['red'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) res1 = df.query('color != ["red"]', parser=parser, engine=engine) res2 = df.query('["red"] != color', parser=parser, engine=engine) exp = df[~ind.isin(['red'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) # in/not in ops res1 = df.query('["red"] in color', parser=parser, engine=engine) res2 = df.query('"red" in color', parser=parser, engine=engine) exp = df[ind.isin(['red'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) res1 = df.query('["red"] not in color', parser=parser, engine=engine) res2 = df.query('"red" not in color', parser=parser, engine=engine) exp = df[~ind.isin(['red'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) def test_query_with_unnamed_multiindex(self, parser, engine): tm.skip_if_no_ne(engine) skip_if_no_pandas_parser(parser) a = np.random.choice(['red', 'green'], size=10) b = np.random.choice(['eggs', 'ham'], size=10) index = MultiIndex.from_arrays([a, b]) df = DataFrame(randn(10, 2), index=index) ind = Series(df.index.get_level_values(0).values, index=index) res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine) res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine) exp = df[ind == 'red'] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) # inequality res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine) res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine) exp = df[ind != 'red'] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) # list equality (really just set membership) res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine) res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine) exp = df[ind.isin(['red'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine) res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine) exp = df[~ind.isin(['red'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) # in/not in ops res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine) res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine) exp = df[ind.isin(['red'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine) res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine) exp = df[~ind.isin(['red'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) # ## LEVEL 1 ind = Series(df.index.get_level_values(1).values, index=index) res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine) res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine) exp = df[ind == 'eggs'] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) # inequality res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine) res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine) exp = df[ind != 'eggs'] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) # list equality (really just set membership) res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine) res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine) exp = df[ind.isin(['eggs'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine) res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine) exp = df[~ind.isin(['eggs'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) # in/not in ops res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine) res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine) exp = df[ind.isin(['eggs'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine) res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine) exp = df[~ind.isin(['eggs'])] assert_frame_equal(res1, exp) assert_frame_equal(res2, exp) def test_query_with_partially_named_multiindex(self, parser, engine): tm.skip_if_no_ne(engine) skip_if_no_pandas_parser(parser) a = np.random.choice(['red', 'green'], size=10) b = np.arange(10) index = MultiIndex.from_arrays([a, b]) index.names = [None, 'rating'] df = DataFrame(randn(10, 2), index=index) res = df.query('rating == 1', parser=parser, engine=engine) ind = Series(df.index.get_level_values('rating').values, index=index, name='rating') exp = df[ind == 1] assert_frame_equal(res, exp) res = df.query('rating != 1', parser=parser, engine=engine) ind = Series(df.index.get_level_values('rating').values, index=index, name='rating') exp = df[ind != 1] assert_frame_equal(res, exp) res = df.query('ilevel_0 == "red"', parser=parser, engine=engine) ind = Series(df.index.get_level_values(0).values, index=index) exp = df[ind == "red"] assert_frame_equal(res, exp) res = df.query('ilevel_0 != "red"', parser=parser, engine=engine) ind = Series(df.index.get_level_values(0).values, index=index) exp = df[ind != "red"] assert_frame_equal(res, exp) def test_query_multiindex_get_index_resolvers(self): df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs']) resolvers = df._get_index_resolvers() def to_series(mi, level): level_values = mi.get_level_values(level) s = level_values.to_series() s.index = mi return s col_series = df.columns.to_series() expected = {'index': df.index, 'columns': col_series, 'spam': to_series(df.index, 'spam'), 'eggs': to_series(df.index, 'eggs'), 'C0': col_series} for k, v in resolvers.items(): if isinstance(v, Index): assert v.is_(expected[k]) elif isinstance(v, Series): assert_series_equal(v, expected[k]) else: raise AssertionError("object must be a Series or Index") def test_raise_on_panel_with_multiindex(self, parser, engine): tm.skip_if_no_ne() p = tm.makePanel(7) p.items = tm.makeCustomIndex(len(p.items), nlevels=2) with pytest.raises(NotImplementedError): pd.eval('p + 1', parser=parser, engine=engine) def test_raise_on_panel4d_with_multiindex(self, parser, engine): tm.skip_if_no_ne() p4d = tm.makePanel4D(7) p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2) with pytest.raises(NotImplementedError): pd.eval('p4d + 1', parser=parser, engine=engine) class TestDataFrameQueryNumExprPandas(object): @classmethod def setup_class(cls): cls.engine = 'numexpr' cls.parser = 'pandas' tm.skip_if_no_ne(cls.engine) @classmethod def teardown_class(cls): del cls.engine, cls.parser def test_date_query_with_attribute_access(self): engine, parser = self.engine, self.parser skip_if_no_pandas_parser(parser) df = DataFrame(randn(5, 3)) df['dates1'] = date_range('1/1/2012', periods=5) df['dates2'] = date_range('1/1/2013', periods=5) df['dates3'] = date_range('1/1/2014', periods=5) res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine, parser=parser) expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)] assert_frame_equal(res, expec) def test_date_query_no_attribute_access(self): engine, parser = self.engine, self.parser df = DataFrame(randn(5, 3)) df['dates1'] = date_range('1/1/2012', periods=5) df['dates2'] = date_range('1/1/2013', periods=5) df['dates3'] = date_range('1/1/2014', periods=5) res = df.query('dates1 < 20130101 < dates3', engine=engine, parser=parser) expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)] assert_frame_equal(res, expec) def test_date_query_with_NaT(self): engine, parser = self.engine, self.parser n = 10 df = DataFrame(randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates2'] = date_range('1/1/2013', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT res = df.query('dates1 < 20130101 < dates3', engine=engine, parser=parser) expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)] assert_frame_equal(res, expec) def test_date_index_query(self): engine, parser = self.engine, self.parser n = 10 df = DataFrame(randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.set_index('dates1', inplace=True, drop=True) res = df.query('index < 20130101 < dates3', engine=engine, parser=parser) expec = df[(df.index < '20130101') & ('20130101' < df.dates3)] assert_frame_equal(res, expec) def test_date_index_query_with_NaT(self): engine, parser = self.engine, self.parser n = 10 df = DataFrame(randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.iloc[0, 0] = pd.NaT df.set_index('dates1', inplace=True, drop=True) res = df.query('index < 20130101 < dates3', engine=engine, parser=parser) expec = df[(df.index < '20130101') & ('20130101' < df.dates3)] assert_frame_equal(res, expec) def test_date_index_query_with_NaT_duplicates(self): engine, parser = self.engine, self.parser n = 10 d = {} d['dates1'] = date_range('1/1/2012', periods=n) d['dates3'] = date_range('1/1/2014', periods=n) df = DataFrame(d) df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT df.set_index('dates1', inplace=True, drop=True) res = df.query('dates1 < 20130101 < dates3', engine=engine, parser=parser) expec = df[(df.index.to_series() < '20130101') & ('20130101' < df.dates3)] assert_frame_equal(res, expec) def test_date_query_with_non_date(self): engine, parser = self.engine, self.parser n = 10 df = DataFrame({'dates': date_range('1/1/2012', periods=n), 'nondate': np.arange(n)}) ops = '==', '!=', '<', '>', '<=', '>=' for op in ops: with pytest.raises(TypeError): df.query('dates %s nondate' % op, parser=parser, engine=engine) def test_query_syntax_error(self): engine, parser = self.engine, self.parser df = DataFrame({"i": lrange(10), "+": lrange(3, 13), "r": lrange(4, 14)}) with pytest.raises(SyntaxError): df.query('i - +', engine=engine, parser=parser) def test_query_scope(self): from pandas.core.computation.ops import UndefinedVariableError engine, parser = self.engine, self.parser skip_if_no_pandas_parser(parser) df = DataFrame(np.random.randn(20, 2), columns=list('ab')) a, b = 1, 2 # noqa res = df.query('a > b', engine=engine, parser=parser) expected = df[df.a > df.b] assert_frame_equal(res, expected) res = df.query('@a > b', engine=engine, parser=parser) expected = df[a > df.b] assert_frame_equal(res, expected) # no local variable c with pytest.raises(UndefinedVariableError): df.query('@a > b > @c', engine=engine, parser=parser) # no column named 'c' with pytest.raises(UndefinedVariableError): df.query('@a > b > c', engine=engine, parser=parser) def test_query_doesnt_pickup_local(self): from pandas.core.computation.ops import UndefinedVariableError engine, parser = self.engine, self.parser n = m = 10 df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc')) # we don't pick up the local 'sin' with pytest.raises(UndefinedVariableError): df.query('sin > 5', engine=engine, parser=parser) def test_query_builtin(self): from pandas.core.computation.engines import NumExprClobberingError engine, parser = self.engine, self.parser n = m = 10 df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc')) df.index.name = 'sin' with tm.assert_raises_regex(NumExprClobberingError, 'Variables in expression.+'): df.query('sin > 5', engine=engine, parser=parser) def test_query(self): engine, parser = self.engine, self.parser df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c']) assert_frame_equal(df.query('a < b', engine=engine, parser=parser), df[df.a < df.b]) assert_frame_equal(df.query('a + b > b * c', engine=engine, parser=parser), df[df.a + df.b > df.b * df.c]) def test_query_index_with_name(self): engine, parser = self.engine, self.parser df = DataFrame(np.random.randint(10, size=(10, 3)), index=Index(range(10), name='blob'), columns=['a', 'b', 'c']) res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser) expec = df[(df.index < 5) & (df.a < df.b)] assert_frame_equal(res, expec) res = df.query('blob < b', engine=engine, parser=parser) expec = df[df.index < df.b] assert_frame_equal(res, expec) def test_query_index_without_name(self): engine, parser = self.engine, self.parser df = DataFrame(np.random.randint(10, size=(10, 3)), index=range(10), columns=['a', 'b', 'c']) # "index" should refer to the index res = df.query('index < b', engine=engine, parser=parser) expec = df[df.index < df.b] assert_frame_equal(res, expec) # test against a scalar res = df.query('index < 5', engine=engine, parser=parser) expec = df[df.index < 5] assert_frame_equal(res, expec) def test_nested_scope(self): engine = self.engine parser = self.parser skip_if_no_pandas_parser(parser) df = DataFrame(np.random.randn(5, 3)) df2 = DataFrame(np.random.randn(5, 3)) expected = df[(df > 0) & (df2 > 0)] result = df.query('(@df > 0) & (@df2 > 0)', engine=engine, parser=parser) assert_frame_equal(result, expected) result = pd.eval('df[df > 0 and df2 > 0]', engine=engine, parser=parser) assert_frame_equal(result, expected) result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]', engine=engine, parser=parser) expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)] assert_frame_equal(result, expected) result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser) expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser) assert_frame_equal(result, expected) def test_nested_raises_on_local_self_reference(self): from pandas.core.computation.ops import UndefinedVariableError df = DataFrame(np.random.randn(5, 3)) # can't reference ourself b/c we're a local so @ is necessary with pytest.raises(UndefinedVariableError): df.query('df > 0', engine=self.engine, parser=self.parser) def test_local_syntax(self): skip_if_no_pandas_parser(self.parser) engine, parser = self.engine, self.parser df = DataFrame(randn(100, 10), columns=list('abcdefghij')) b = 1 expect = df[df.a < b] result = df.query('a < @b', engine=engine, parser=parser) assert_frame_equal(result, expect) expect = df[df.a < df.b] result = df.query('a < b', engine=engine, parser=parser) assert_frame_equal(result, expect) def test_chained_cmp_and_in(self): skip_if_no_pandas_parser(self.parser) engine, parser = self.engine, self.parser cols = list('abc') df = DataFrame(randn(100, len(cols)), columns=cols) res = df.query('a < b < c and a not in b not in c', engine=engine, parser=parser) ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b) # noqa expec = df[ind] assert_frame_equal(res, expec) def test_local_variable_with_in(self): engine, parser = self.engine, self.parser skip_if_no_pandas_parser(parser) a = Series(np.random.randint(3, size=15), name='a') b = Series(np.random.randint(10, size=15), name='b') df = DataFrame({'a': a, 'b': b}) expected = df.loc[(df.b - 1).isin(a)] result = df.query('b - 1 in a', engine=engine, parser=parser) assert_frame_equal(expected, result) b = Series(np.random.randint(10, size=15), name='b') expected = df.loc[(b - 1).isin(a)] result = df.query('@b - 1 in a', engine=engine, parser=parser) assert_frame_equal(expected, result) def test_at_inside_string(self): engine, parser = self.engine, self.parser skip_if_no_pandas_parser(parser) c = 1 # noqa df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']}) result = df.query('a == "@c"', engine=engine, parser=parser) expected = df[df.a == "@c"] assert_frame_equal(result, expected) def test_query_undefined_local(self): from pandas.core.computation.ops import UndefinedVariableError engine, parser = self.engine, self.parser skip_if_no_pandas_parser(parser) df = DataFrame(np.random.rand(10, 2), columns=list('ab')) with tm.assert_raises_regex(UndefinedVariableError, "local variable 'c' is not defined"): df.query('a == @c', engine=engine, parser=parser) def test_index_resolvers_come_after_columns_with_the_same_name(self): n = 1 # noqa a = np.r_[20:101:20] df = DataFrame({'index': a, 'b': np.random.randn(a.size)}) df.index.name = 'index' result = df.query('index > 5', engine=self.engine, parser=self.parser) expected = df[df['index'] > 5] assert_frame_equal(result, expected) df = DataFrame({'index': a, 'b': np.random.randn(a.size)}) result = df.query('ilevel_0 > 5', engine=self.engine, parser=self.parser) expected = df.loc[df.index[df.index > 5]] assert_frame_equal(result, expected) df = DataFrame({'a': a, 'b': np.random.randn(a.size)}) df.index.name = 'a' result = df.query('a > 5', engine=self.engine, parser=self.parser) expected = df[df.a > 5] assert_frame_equal(result, expected) result = df.query('index > 5', engine=self.engine, parser=self.parser) expected = df.loc[df.index[df.index > 5]] assert_frame_equal(result, expected) def test_inf(self): n = 10 df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)}) df.loc[::2, 0] = np.inf ops = '==', '!=' d = dict(zip(ops, (operator.eq, operator.ne))) for op, f in d.items(): q = 'a %s inf' % op expected = df[f(df.a, np.inf)] result = df.query(q, engine=self.engine, parser=self.parser) assert_frame_equal(result, expected) class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas): @classmethod def setup_class(cls): super(TestDataFrameQueryNumExprPython, cls).setup_class() cls.engine = 'numexpr' cls.parser = 'python' tm.skip_if_no_ne(cls.engine) cls.frame = TestData().frame def test_date_query_no_attribute_access(self): engine, parser = self.engine, self.parser df = DataFrame(randn(5, 3)) df['dates1'] = date_range('1/1/2012', periods=5) df['dates2'] = date_range('1/1/2013', periods=5) df['dates3'] = date_range('1/1/2014', periods=5) res = df.query('(dates1 < 20130101) & (20130101 < dates3)', engine=engine, parser=parser) expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)] assert_frame_equal(res, expec) def test_date_query_with_NaT(self): engine, parser = self.engine, self.parser n = 10 df = DataFrame(randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates2'] = date_range('1/1/2013', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT res = df.query('(dates1 < 20130101) & (20130101 < dates3)', engine=engine, parser=parser) expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)] assert_frame_equal(res, expec) def test_date_index_query(self): engine, parser = self.engine, self.parser n = 10 df = DataFrame(randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.set_index('dates1', inplace=True, drop=True) res = df.query('(index < 20130101) & (20130101 < dates3)', engine=engine, parser=parser) expec = df[(df.index < '20130101') & ('20130101' < df.dates3)] assert_frame_equal(res, expec) def test_date_index_query_with_NaT(self): engine, parser = self.engine, self.parser n = 10 df = DataFrame(randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.iloc[0, 0] = pd.NaT df.set_index('dates1', inplace=True, drop=True) res = df.query('(index < 20130101) & (20130101 < dates3)', engine=engine, parser=parser) expec = df[(df.index < '20130101') & ('20130101' < df.dates3)] assert_frame_equal(res, expec) def test_date_index_query_with_NaT_duplicates(self): engine, parser = self.engine, self.parser n = 10 df = DataFrame(randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT df.set_index('dates1', inplace=True, drop=True) with pytest.raises(NotImplementedError): df.query('index < 20130101 < dates3', engine=engine, parser=parser) def test_nested_scope(self): from pandas.core.computation.ops import UndefinedVariableError engine = self.engine parser = self.parser # smoke test x = 1 # noqa result = pd.eval('x + 1', engine=engine, parser=parser) assert result == 2 df = DataFrame(np.random.randn(5, 3)) df2 = DataFrame(np.random.randn(5, 3)) # don't have the pandas parser with pytest.raises(SyntaxError): df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser) with pytest.raises(UndefinedVariableError): df.query('(df>0) & (df2>0)', engine=engine, parser=parser) expected = df[(df > 0) & (df2 > 0)] result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine, parser=parser) assert_frame_equal(expected, result) expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)] result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]', engine=engine, parser=parser) assert_frame_equal(expected, result) class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas): @classmethod def setup_class(cls): super(TestDataFrameQueryPythonPandas, cls).setup_class() cls.engine = 'python' cls.parser = 'pandas' cls.frame = TestData().frame def test_query_builtin(self): engine, parser = self.engine, self.parser n = m = 10 df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc')) df.index.name = 'sin' expected = df[df.index > 5] result = df.query('sin > 5', engine=engine, parser=parser) assert_frame_equal(expected, result) class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython): @classmethod def setup_class(cls): super(TestDataFrameQueryPythonPython, cls).setup_class() cls.engine = cls.parser = 'python' cls.frame = TestData().frame def test_query_builtin(self): engine, parser = self.engine, self.parser n = m = 10 df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc')) df.index.name = 'sin' expected = df[df.index > 5] result = df.query('sin > 5', engine=engine, parser=parser) assert_frame_equal(expected, result) class TestDataFrameQueryStrings(object): def test_str_query_method(self, parser, engine): tm.skip_if_no_ne(engine) df = DataFrame(randn(10, 1), columns=['b']) df['strings'] = Series(list('aabbccddee')) expect = df[df.strings == 'a'] if parser != 'pandas': col = 'strings' lst = '"a"' lhs = [col] * 2 + [lst] * 2 rhs = lhs[::-1] eq, ne = '==', '!=' ops = 2 * ([eq] + [ne]) for lhs, op, rhs in zip(lhs, ops, rhs): ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs) pytest.raises(NotImplementedError, df.query, ex, engine=engine, parser=parser, local_dict={'strings': df.strings}) else: res = df.query('"a" == strings', engine=engine, parser=parser) assert_frame_equal(res, expect) res = df.query('strings == "a"', engine=engine, parser=parser) assert_frame_equal(res, expect) assert_frame_equal(res, df[df.strings.isin(['a'])]) expect = df[df.strings != 'a'] res = df.query('strings != "a"', engine=engine, parser=parser) assert_frame_equal(res, expect) res = df.query('"a" != strings', engine=engine, parser=parser) assert_frame_equal(res, expect) assert_frame_equal(res, df[~df.strings.isin(['a'])]) def test_str_list_query_method(self, parser, engine): tm.skip_if_no_ne(engine) df = DataFrame(randn(10, 1), columns=['b']) df['strings'] = Series(list('aabbccddee')) expect = df[df.strings.isin(['a', 'b'])] if parser != 'pandas': col = 'strings' lst = '["a", "b"]' lhs = [col] * 2 + [lst] * 2 rhs = lhs[::-1] eq, ne = '==', '!=' ops = 2 * ([eq] + [ne]) for lhs, op, rhs in zip(lhs, ops, rhs): ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs) with pytest.raises(NotImplementedError): df.query(ex, engine=engine, parser=parser) else: res = df.query('strings == ["a", "b"]', engine=engine, parser=parser) assert_frame_equal(res, expect) res = df.query('["a", "b"] == strings', engine=engine, parser=parser) assert_frame_equal(res, expect) expect = df[~df.strings.isin(['a', 'b'])] res = df.query('strings != ["a", "b"]', engine=engine, parser=parser) assert_frame_equal(res, expect) res = df.query('["a", "b"] != strings', engine=engine, parser=parser) assert_frame_equal(res, expect) def test_query_with_string_columns(self, parser, engine): tm.skip_if_no_ne(engine) df = DataFrame({'a': list('aaaabbbbcccc'), 'b': list('aabbccddeeff'), 'c': np.random.randint(5, size=12), 'd': np.random.randint(9, size=12)}) if parser == 'pandas': res = df.query('a in b', parser=parser, engine=engine) expec = df[df.a.isin(df.b)] assert_frame_equal(res, expec) res = df.query('a in b and c < d', parser=parser, engine=engine) expec = df[df.a.isin(df.b) & (df.c < df.d)] assert_frame_equal(res, expec) else: with pytest.raises(NotImplementedError): df.query('a in b', parser=parser, engine=engine) with pytest.raises(NotImplementedError): df.query('a in b and c < d', parser=parser, engine=engine) def test_object_array_eq_ne(self, parser, engine): tm.skip_if_no_ne(engine) df = DataFrame({'a': list('aaaabbbbcccc'), 'b': list('aabbccddeeff'), 'c': np.random.randint(5, size=12), 'd': np.random.randint(9, size=12)}) res = df.query('a == b', parser=parser, engine=engine) exp = df[df.a == df.b] assert_frame_equal(res, exp) res = df.query('a != b', parser=parser, engine=engine) exp = df[df.a != df.b] assert_frame_equal(res, exp) def test_query_with_nested_strings(self, parser, engine): tm.skip_if_no_ne(engine) skip_if_no_pandas_parser(parser) raw = """id event timestamp 1 "page 1 load" 1/1/2014 0:00:01 1 "page 1 exit" 1/1/2014 0:00:31 2 "page 2 load" 1/1/2014 0:01:01 2 "page 2 exit" 1/1/2014 0:01:31 3 "page 3 load" 1/1/2014 0:02:01 3 "page 3 exit" 1/1/2014 0:02:31 4 "page 1 load" 2/1/2014 1:00:01 4 "page 1 exit" 2/1/2014 1:00:31 5 "page 2 load" 2/1/2014 1:01:01 5 "page 2 exit" 2/1/2014 1:01:31 6 "page 3 load" 2/1/2014 1:02:01 6 "page 3 exit" 2/1/2014 1:02:31 """ df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python', parse_dates=['timestamp']) expected = df[df.event == '"page 1 load"'] res = df.query("""'"page 1 load"' in event""", parser=parser, engine=engine) assert_frame_equal(expected, res) def test_query_with_nested_special_character(self, parser, engine): skip_if_no_pandas_parser(parser) tm.skip_if_no_ne(engine) df = DataFrame({'a': ['a', 'b', 'test & test'], 'b': [1, 2, 3]}) res = df.query('a == "test & test"', parser=parser, engine=engine) expec = df[df.a == 'test & test'] assert_frame_equal(res, expec) def test_query_lex_compare_strings(self, parser, engine): tm.skip_if_no_ne(engine=engine) import operator as opr a = Series(np.random.choice(list('abcde'), 20)) b = Series(np.arange(a.size)) df = DataFrame({'X': a, 'Y': b}) ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge} for op, func in ops.items(): res = df.query('X %s "d"' % op, engine=engine, parser=parser) expected = df[func(df.X, 'd')] assert_frame_equal(res, expected) def test_query_single_element_booleans(self, parser, engine): tm.skip_if_no_ne(engine) columns = 'bid', 'bidsize', 'ask', 'asksize' data = np.random.randint(2, size=(1, len(columns))).astype(bool) df = DataFrame(data, columns=columns) res = df.query('bid & ask', engine=engine, parser=parser) expected = df[df.bid & df.ask] assert_frame_equal(res, expected) def test_query_string_scalar_variable(self, parser, engine): tm.skip_if_no_ne(engine) skip_if_no_pandas_parser(parser) df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'], 'Price': [109.70, 109.72, 183.30, 183.35]}) e = df[df.Symbol == 'BUD US'] symb = 'BUD US' # noqa r = df.query('Symbol == @symb', parser=parser, engine=engine) assert_frame_equal(e, r) class TestDataFrameEvalNumExprPandas(object): @classmethod def setup_class(cls): cls.engine = 'numexpr' cls.parser = 'pandas' tm.skip_if_no_ne() def setup_method(self, method): self.frame = DataFrame(randn(10, 3), columns=list('abc')) def teardown_method(self, method): del self.frame def test_simple_expr(self): res = self.frame.eval('a + b', engine=self.engine, parser=self.parser) expect = self.frame.a + self.frame.b assert_series_equal(res, expect) def test_bool_arith_expr(self): res = self.frame.eval('a[a < 1] + b', engine=self.engine, parser=self.parser) expect = self.frame.a[self.frame.a < 1] + self.frame.b assert_series_equal(res, expect) def test_invalid_type_for_operator_raises(self): df = DataFrame({'a': [1, 2], 'b': ['c', 'd']}) ops = '+', '-', '*', '/' for op in ops: with tm.assert_raises_regex(TypeError, "unsupported operand type\(s\) " "for .+: '.+' and '.+'"): df.eval('a {0} b'.format(op), engine=self.engine, parser=self.parser) class TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas): @classmethod def setup_class(cls): super(TestDataFrameEvalNumExprPython, cls).setup_class() cls.engine = 'numexpr' cls.parser = 'python' tm.skip_if_no_ne(cls.engine) class TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas): @classmethod def setup_class(cls): super(TestDataFrameEvalPythonPandas, cls).setup_class() cls.engine = 'python' cls.parser = 'pandas' class TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython): @classmethod def setup_class(cls): cls.engine = cls.parser = 'python'
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """This module contains functions and methods that relate to the DataInfo class which provides a container for informational attributes as well as summary info methods. A DataInfo object is attached to the Quantity, SkyCoord, and Time classes in astropy. Here it allows those classes to be used in Tables and uniformly carry table column attributes such as name, format, dtype, meta, and description. """ # Note: these functions and classes are tested extensively in astropy table # tests via their use in providing mixin column info, and in # astropy/tests/test_info for providing table and column info summary data. from __future__ import absolute_import, division, print_function import os import sys import weakref from copy import deepcopy import numpy as np from functools import partial import warnings import re from collections import OrderedDict from ..extern import six from ..extern.six.moves import zip, cStringIO as StringIO from . import metadata __all__ = ['data_info_factory', 'dtype_info_name', 'BaseColumnInfo', 'DataInfo', 'MixinInfo', 'ParentDtypeInfo'] # Tuple of filterwarnings kwargs to ignore when calling info IGNORE_WARNINGS = (dict(category=RuntimeWarning, message='All-NaN|' 'Mean of empty slice|Degrees of freedom <= 0'),) STRING_TYPE_NAMES = {(False, 'S'): 'str', # not PY3 (False, 'U'): 'unicode', (True, 'S'): 'bytes', # PY3 (True, 'U'): 'str'} def dtype_info_name(dtype): """Return a human-oriented string name of the ``dtype`` arg. This can be use by astropy methods that present type information about a data object. The output is mostly equivalent to ``dtype.name`` which takes the form <type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an optional number of bits which gets included only for numeric types. For bytes, string and unicode types, the output is shown below, where <N> is the number of characters. This representation corresponds to the Python type that matches the dtype:: Numpy S<N> U<N> Python 2 str<N> unicode<N> Python 3 bytes<N> str<N> Parameters ---------- dtype : str, np.dtype, type Input dtype as an object that can be converted via np.dtype() Returns ------- dtype_info_name : str String name of ``dtype`` """ dtype = np.dtype(dtype) if dtype.kind in ('S', 'U'): length = re.search(r'(\d+)', dtype.str).group(1) type_name = STRING_TYPE_NAMES[(not six.PY2, dtype.kind)] out = type_name + length else: out = dtype.name return out def data_info_factory(names, funcs): """ Factory to create a function that can be used as an ``option`` for outputting data object summary information. Examples -------- >>> from astropy.utils.data_info import data_info_factory >>> from astropy.table import Column >>> c = Column([4., 3., 2., 1.]) >>> mystats = data_info_factory(names=['min', 'median', 'max'], ... funcs=[np.min, np.median, np.max]) >>> c.info(option=mystats) min = 1.0 median = 2.5 max = 4.0 n_bad = 0 length = 4 Parameters ---------- names : list List of information attribute names funcs : list List of functions that compute the corresponding information attribute Returns ------- func : function Function that can be used as a data info option """ def func(dat): outs = [] for name, func in zip(names, funcs): try: if isinstance(func, six.string_types): out = getattr(dat, func)() else: out = func(dat) except Exception: outs.append('--') else: outs.append(str(out)) return OrderedDict(zip(names, outs)) return func def _get_obj_attrs_map(obj, attrs): """ Get the values for object ``attrs`` and return as a dict. This ignores any attributes that are None and in Py2 converts any unicode attribute names or values to str. In the context of serializing the supported core astropy classes this conversion will succeed and results in more succinct and less python-specific YAML. """ out = {} for attr in attrs: val = getattr(obj, attr, None) if val is not None: if six.PY2: attr = str(attr) if isinstance(val, six.text_type): val = str(val) out[attr] = val return out def _get_data_attribute(dat, attr=None): """ Get a data object attribute for the ``attributes`` info summary method """ if attr == 'class': val = type(dat).__name__ elif attr == 'dtype': val = dtype_info_name(dat.info.dtype) elif attr == 'shape': datshape = dat.shape[1:] val = datshape if datshape else '' else: val = getattr(dat.info, attr) if val is None: val = '' return str(val) class DataInfo(object): """ Descriptor that data classes use to add an ``info`` attribute for storing data attributes in a uniform and portable way. Note that it *must* be called ``info`` so that the DataInfo() object can be stored in the ``instance`` using the ``info`` key. Because owner_cls.x is a descriptor, Python doesn't use __dict__['x'] normally, and the descriptor can safely store stuff there. Thanks to http://nbviewer.ipython.org/urls/ gist.github.com/ChrisBeaumont/5758381/raw/descriptor_writeup.ipynb for this trick that works for non-hashable classes. Parameters ---------- bound : bool If True this is a descriptor attribute in a class definition, else it is a DataInfo() object that is bound to a data object instance. Default is False. """ _stats = ['mean', 'std', 'min', 'max'] attrs_from_parent = set() attr_names = set(['name', 'unit', 'dtype', 'format', 'description', 'meta']) _attrs_no_copy = set() _info_summary_attrs = ('dtype', 'shape', 'unit', 'format', 'description', 'class') _represent_as_dict_attrs= () _parent = None def __init__(self, bound=False): # If bound to a data object instance then create the dict of attributes # which stores the info attribute values. if bound: self._attrs = dict((attr, None) for attr in self.attr_names) def __get__(self, instance, owner_cls): if instance is None: # This is an unbound descriptor on the class info = self info._parent_cls = owner_cls else: info = instance.__dict__.get('info') if info is None: info = instance.__dict__['info'] = self.__class__(bound=True) info._parent = instance return info def __set__(self, instance, value): if instance is None: # This is an unbound descriptor on the class raise ValueError('cannot set unbound descriptor') if isinstance(value, DataInfo): info = instance.__dict__['info'] = self.__class__(bound=True) for attr in info.attr_names - info.attrs_from_parent - info._attrs_no_copy: info._attrs[attr] = deepcopy(getattr(value, attr)) else: raise TypeError('info must be set with a DataInfo instance') def __getstate__(self): return self._attrs def __setstate__(self, state): self._attrs = state def __getattr__(self, attr): if attr.startswith('_'): return super(DataInfo, self).__getattribute__(attr) if attr in self.attrs_from_parent: return getattr(self._parent, attr) try: value = self._attrs[attr] except KeyError: super(DataInfo, self).__getattribute__(attr) # Generate AttributeError # Weak ref for parent table if attr == 'parent_table' and callable(value): value = value() # Mixins have a default dtype of Object if nothing else was set if attr == 'dtype' and value is None: value = np.dtype('O') return value def __setattr__(self, attr, value): propobj = getattr(self.__class__, attr, None) # If attribute is taken from parent properties and there is not a # class property (getter/setter) for this attribute then set # attribute directly in parent. if attr in self.attrs_from_parent and not isinstance(propobj, property): setattr(self._parent, attr, value) return # Check if there is a property setter and use it if possible. if isinstance(propobj, property): if propobj.fset is None: raise AttributeError("can't set attribute") propobj.fset(self, value) return # Private attr names get directly set if attr.startswith('_'): super(DataInfo, self).__setattr__(attr, value) return # Finally this must be an actual data attribute that this class is handling. if attr not in self.attr_names: raise AttributeError("attribute must be one of {0}".format(self.attr_names)) if attr == 'parent_table': value = None if value is None else weakref.ref(value) self._attrs[attr] = value def _represent_as_dict(self): """ Get the values for the parent ``attrs`` and return as a dict. This is typically used for serializing the parent. """ return _get_obj_attrs_map(self._parent, self._represent_as_dict_attrs) def _construct_from_dict(self, map): return self._parent_cls(**map) info_summary_attributes = staticmethod( data_info_factory(names=_info_summary_attrs, funcs=[partial(_get_data_attribute, attr=attr) for attr in _info_summary_attrs])) # No nan* methods in numpy < 1.8 info_summary_stats = staticmethod( data_info_factory(names=_stats, funcs=[getattr(np, 'nan' + stat) for stat in _stats])) def __call__(self, option='attributes', out=''): """ Write summary information about data object to the ``out`` filehandle. By default this prints to standard output via sys.stdout. The ``option`` argument specifies what type of information to include. This can be a string, a function, or a list of strings or functions. Built-in options are: - ``attributes``: data object attributes like ``dtype`` and ``format`` - ``stats``: basic statistics: min, mean, and max If a function is specified then that function will be called with the data object as its single argument. The function must return an OrderedDict containing the information attributes. If a list is provided then the information attributes will be appended for each of the options, in order. Examples -------- >>> from astropy.table import Column >>> c = Column([1, 2], unit='m', dtype='int32') >>> c.info() dtype = int32 unit = m class = Column n_bad = 0 length = 2 >>> c.info(['attributes', 'stats']) dtype = int32 unit = m class = Column mean = 1.5 std = 0.5 min = 1 max = 2 n_bad = 0 length = 2 Parameters ---------- option : str, function, list of (str or function) Info option, defaults to 'attributes'. out : file-like object, None Output destination, defaults to sys.stdout. If None then the OrderedDict with information attributes is returned Returns ------- info : OrderedDict if out==None else None """ if out == '': out = sys.stdout dat = self._parent info = OrderedDict() name = dat.info.name if name is not None: info['name'] = name options = option if isinstance(option, (list, tuple)) else [option] for option in options: if isinstance(option, six.string_types): if hasattr(self, 'info_summary_' + option): option = getattr(self, 'info_summary_' + option) else: raise ValueError('option={0} is not an allowed information type' .format(option)) with warnings.catch_warnings(): for ignore_kwargs in IGNORE_WARNINGS: warnings.filterwarnings('ignore', **ignore_kwargs) info.update(option(dat)) if hasattr(dat, 'mask'): n_bad = np.count_nonzero(dat.mask) else: try: n_bad = np.count_nonzero(np.isinf(dat) | np.isnan(dat)) except Exception: n_bad = 0 info['n_bad'] = n_bad try: info['length'] = len(dat) except TypeError: pass if out is None: return info for key, val in info.items(): if val != '': out.write('{0} = {1}'.format(key, val) + os.linesep) def __repr__(self): if self._parent is None: return super(DataInfo, self).__repr__() out = StringIO() self.__call__(out=out) return out.getvalue() class BaseColumnInfo(DataInfo): """ Base info class for anything that can be a column in an astropy Table. There are at least two classes that inherit from this: ColumnInfo: for native astropy Column / MaskedColumn objects MixinInfo: for mixin column objects Note that this class is defined here so that mixins can use it without importing the table package. """ attr_names = DataInfo.attr_names.union(['parent_table', 'indices']) _attrs_no_copy = set(['parent_table']) def iter_str_vals(self): """ This is a mixin-safe version of Column.iter_str_vals. """ col = self._parent if self.parent_table is None: from ..table.column import FORMATTER as formatter else: formatter = self.parent_table.formatter _pformat_col_iter = formatter._pformat_col_iter for str_val in _pformat_col_iter(col, -1, False, False, {}): yield str_val def adjust_indices(self, index, value, col_len): ''' Adjust info indices after column modification. Parameters ---------- index : slice, int, list, or ndarray Element(s) of column to modify. This parameter can be a single row number, a list of row numbers, an ndarray of row numbers, a boolean ndarray (a mask), or a column slice. value : int, list, or ndarray New value(s) to insert col_len : int Length of the column ''' if not self.indices: return if isinstance(index, slice): # run through each key in slice t = index.indices(col_len) keys = list(range(*t)) elif isinstance(index, np.ndarray) and index.dtype.kind == 'b': # boolean mask keys = np.where(index)[0] else: # single int keys = [index] value = np.atleast_1d(value) # turn array(x) into array([x]) if value.size == 1: # repeat single value value = list(value) * len(keys) for key, val in zip(keys, value): for col_index in self.indices: col_index.replace(key, self.name, val) def slice_indices(self, col_slice, item, col_len): ''' Given a sliced object, modify its indices to correctly represent the slice. Parameters ---------- col_slice : Column or mixin Sliced object item : slice, list, or ndarray Slice used to create col_slice col_len : int Length of original object ''' from ..table.sorted_array import SortedArray if not getattr(self, '_copy_indices', True): # Necessary because MaskedArray will perform a shallow copy col_slice.info.indices = [] return col_slice elif isinstance(item, slice): col_slice.info.indices = [x[item] for x in self.indices] elif self.indices: if isinstance(item, np.ndarray) and item.dtype.kind == 'b': # boolean mask item = np.where(item)[0] threshold = 0.6 # Empirical testing suggests that recreating a BST/RBT index is # more effective than relabelling when less than ~60% of # the total number of rows are involved, and is in general # more effective for SortedArray. small = len(item) <= 0.6 * col_len col_slice.info.indices = [] for index in self.indices: if small or isinstance(index, SortedArray): new_index = index.get_slice(col_slice, item) else: new_index = deepcopy(index) new_index.replace_rows(item) col_slice.info.indices.append(new_index) return col_slice @staticmethod def merge_cols_attributes(cols, metadata_conflicts, name, attrs): """ Utility method to merge and validate the attributes ``attrs`` for the input table columns ``cols``. Note that ``dtype`` and ``shape`` attributes are handled specially. These should not be passed in ``attrs`` but will always be in the returned dict of merged attributes. Parameters ---------- cols : list List of input Table column objects metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name attrs : list List of attribute names to be merged Returns ------- attrs : dict of merged attributes """ from ..table.np_utils import TableMergeError def warn_str_func(key, left, right): out = ("In merged column '{}' the '{}' attribute does not match " "({} != {}). Using {} for merged output" .format(name, key, left, right, right)) return out def getattrs(col): return {attr: getattr(col.info, attr) for attr in attrs if getattr(col.info, attr, None) is not None} out = getattrs(cols[0]) for col in cols[1:]: out = metadata.merge(out, getattrs(col), metadata_conflicts=metadata_conflicts, warn_str_func=warn_str_func) # Output dtype is the superset of all dtypes in in_cols out['dtype'] = metadata.common_dtype(cols) # Make sure all input shapes are the same uniq_shapes = set(col.shape[1:] for col in cols) if len(uniq_shapes) != 1: raise TableMergeError('columns have different shapes') out['shape'] = uniq_shapes.pop() return out class MixinInfo(BaseColumnInfo): def __setattr__(self, attr, value): # For mixin columns that live within a table, rename the column in the # table when setting the name attribute. This mirrors the same # functionality in the BaseColumn class. if attr == 'name' and self.parent_table is not None: from ..table.np_utils import fix_column_name new_name = fix_column_name(value) # Ensure col name is numpy compatible self.parent_table.columns._rename_column(self.name, new_name) super(MixinInfo, self).__setattr__(attr, value) class ParentDtypeInfo(MixinInfo): """Mixin that gets info.dtype from parent""" attrs_from_parent = set(['dtype']) # dtype and unit taken from parent
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import datetime from typing import Any, Dict, List, Optional, Union from azure.core.exceptions import HttpResponseError import msrest.serialization from ._template_specs_client_enums import * class AzureResourceBase(msrest.serialization.Model): """Common properties for all Azure resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: String Id used to locate any resource on Azure. :vartype id: str :ivar name: Name of this resource. :vartype name: str :ivar type: Type of this resource. :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.SystemData """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def __init__( self, **kwargs ): """ """ super(AzureResourceBase, self).__init__(**kwargs) self.id = None self.name = None self.type = None self.system_data = None class ErrorAdditionalInfo(msrest.serialization.Model): """The resource management error additional info. Variables are only populated by the server, and will be ignored when sending a request. :ivar type: The additional info type. :vartype type: str :ivar info: The additional info. :vartype info: any """ _validation = { 'type': {'readonly': True}, 'info': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'info': {'key': 'info', 'type': 'object'}, } def __init__( self, **kwargs ): """ """ super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type = None self.info = None class ErrorResponse(msrest.serialization.Model): """Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). Variables are only populated by the server, and will be ignored when sending a request. :ivar code: The error code. :vartype code: str :ivar message: The error message. :vartype message: str :ivar target: The error target. :vartype target: str :ivar details: The error details. :vartype details: list[~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.ErrorResponse] :ivar additional_info: The error additional info. :vartype additional_info: list[~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.ErrorAdditionalInfo] """ _validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, 'target': {'readonly': True}, 'details': {'readonly': True}, 'additional_info': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[ErrorResponse]'}, 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__( self, **kwargs ): """ """ super(ErrorResponse, self).__init__(**kwargs) self.code = None self.message = None self.target = None self.details = None self.additional_info = None class LinkedTemplateArtifact(msrest.serialization.Model): """Represents a Template Spec artifact containing an embedded Azure Resource Manager template for use as a linked template. All required parameters must be populated in order to send to Azure. :ivar path: Required. A filesystem safe relative path of the artifact. :vartype path: str :ivar template: Required. The Azure Resource Manager template. :vartype template: any """ _validation = { 'path': {'required': True}, 'template': {'required': True}, } _attribute_map = { 'path': {'key': 'path', 'type': 'str'}, 'template': {'key': 'template', 'type': 'object'}, } def __init__( self, *, path: str, template: Any, **kwargs ): """ :keyword path: Required. A filesystem safe relative path of the artifact. :paramtype path: str :keyword template: Required. The Azure Resource Manager template. :paramtype template: any """ super(LinkedTemplateArtifact, self).__init__(**kwargs) self.path = path self.template = template class SystemData(msrest.serialization.Model): """Metadata pertaining to creation and last modification of the resource. :ivar created_by: The identity that created the resource. :vartype created_by: str :ivar created_by_type: The type of identity that created the resource. Possible values include: "User", "Application", "ManagedIdentity", "Key". :vartype created_by_type: str or ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.CreatedByType :ivar created_at: The timestamp of resource creation (UTC). :vartype created_at: ~datetime.datetime :ivar last_modified_by: The identity that last modified the resource. :vartype last_modified_by: str :ivar last_modified_by_type: The type of identity that last modified the resource. Possible values include: "User", "Application", "ManagedIdentity", "Key". :vartype last_modified_by_type: str or ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.CreatedByType :ivar last_modified_at: The timestamp of resource last modification (UTC). :vartype last_modified_at: ~datetime.datetime """ _attribute_map = { 'created_by': {'key': 'createdBy', 'type': 'str'}, 'created_by_type': {'key': 'createdByType', 'type': 'str'}, 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, } def __init__( self, *, created_by: Optional[str] = None, created_by_type: Optional[Union[str, "CreatedByType"]] = None, created_at: Optional[datetime.datetime] = None, last_modified_by: Optional[str] = None, last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None, last_modified_at: Optional[datetime.datetime] = None, **kwargs ): """ :keyword created_by: The identity that created the resource. :paramtype created_by: str :keyword created_by_type: The type of identity that created the resource. Possible values include: "User", "Application", "ManagedIdentity", "Key". :paramtype created_by_type: str or ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.CreatedByType :keyword created_at: The timestamp of resource creation (UTC). :paramtype created_at: ~datetime.datetime :keyword last_modified_by: The identity that last modified the resource. :paramtype last_modified_by: str :keyword last_modified_by_type: The type of identity that last modified the resource. Possible values include: "User", "Application", "ManagedIdentity", "Key". :paramtype last_modified_by_type: str or ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.CreatedByType :keyword last_modified_at: The timestamp of resource last modification (UTC). :paramtype last_modified_at: ~datetime.datetime """ super(SystemData, self).__init__(**kwargs) self.created_by = created_by self.created_by_type = created_by_type self.created_at = created_at self.last_modified_by = last_modified_by self.last_modified_by_type = last_modified_by_type self.last_modified_at = last_modified_at class TemplateSpec(AzureResourceBase): """Template Spec object. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: String Id used to locate any resource on Azure. :vartype id: str :ivar name: Name of this resource. :vartype name: str :ivar type: Type of this resource. :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.SystemData :ivar location: Required. The location of the Template Spec. It cannot be changed after Template Spec creation. It must be one of the supported Azure locations. :vartype location: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :ivar description: Template Spec description. :vartype description: str :ivar display_name: Template Spec display name. :vartype display_name: str :ivar metadata: The Template Spec metadata. Metadata is an open-ended object and is typically a collection of key-value pairs. :vartype metadata: any :ivar versions: High-level information about the versions within this Template Spec. The keys are the version names. Only populated if the $expand query parameter is set to 'versions'. :vartype versions: dict[str, ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpecVersionInfo] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True}, 'description': {'max_length': 4096, 'min_length': 0}, 'display_name': {'max_length': 64, 'min_length': 0}, 'versions': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'display_name': {'key': 'properties.displayName', 'type': 'str'}, 'metadata': {'key': 'properties.metadata', 'type': 'object'}, 'versions': {'key': 'properties.versions', 'type': '{TemplateSpecVersionInfo}'}, } def __init__( self, *, location: str, tags: Optional[Dict[str, str]] = None, description: Optional[str] = None, display_name: Optional[str] = None, metadata: Optional[Any] = None, **kwargs ): """ :keyword location: Required. The location of the Template Spec. It cannot be changed after Template Spec creation. It must be one of the supported Azure locations. :paramtype location: str :keyword tags: A set of tags. Resource tags. :paramtype tags: dict[str, str] :keyword description: Template Spec description. :paramtype description: str :keyword display_name: Template Spec display name. :paramtype display_name: str :keyword metadata: The Template Spec metadata. Metadata is an open-ended object and is typically a collection of key-value pairs. :paramtype metadata: any """ super(TemplateSpec, self).__init__(**kwargs) self.location = location self.tags = tags self.description = description self.display_name = display_name self.metadata = metadata self.versions = None class TemplateSpecsError(msrest.serialization.Model): """Template Specs error response. :ivar error: Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). :vartype error: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.ErrorResponse """ _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorResponse'}, } def __init__( self, *, error: Optional["ErrorResponse"] = None, **kwargs ): """ :keyword error: Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). :paramtype error: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.ErrorResponse """ super(TemplateSpecsError, self).__init__(**kwargs) self.error = error class TemplateSpecsListResult(msrest.serialization.Model): """List of Template Specs. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: An array of Template Specs. :vartype value: list[~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpec] :ivar next_link: The URL to use for getting the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[TemplateSpec]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["TemplateSpec"]] = None, **kwargs ): """ :keyword value: An array of Template Specs. :paramtype value: list[~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpec] """ super(TemplateSpecsListResult, self).__init__(**kwargs) self.value = value self.next_link = None class TemplateSpecUpdateModel(AzureResourceBase): """Template Spec properties to be updated (only tags are currently supported). Variables are only populated by the server, and will be ignored when sending a request. :ivar id: String Id used to locate any resource on Azure. :vartype id: str :ivar name: Name of this resource. :vartype name: str :ivar type: Type of this resource. :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.SystemData :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, *, tags: Optional[Dict[str, str]] = None, **kwargs ): """ :keyword tags: A set of tags. Resource tags. :paramtype tags: dict[str, str] """ super(TemplateSpecUpdateModel, self).__init__(**kwargs) self.tags = tags class TemplateSpecVersion(AzureResourceBase): """Template Spec Version object. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: String Id used to locate any resource on Azure. :vartype id: str :ivar name: Name of this resource. :vartype name: str :ivar type: Type of this resource. :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.SystemData :ivar location: Required. The location of the Template Spec Version. It must match the location of the parent Template Spec. :vartype location: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :ivar description: Template Spec version description. :vartype description: str :ivar linked_templates: An array of linked template artifacts. :vartype linked_templates: list[~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.LinkedTemplateArtifact] :ivar metadata: The version metadata. Metadata is an open-ended object and is typically a collection of key-value pairs. :vartype metadata: any :ivar main_template: The main Azure Resource Manager template content. :vartype main_template: any :ivar ui_form_definition: The Azure Resource Manager template UI definition content. :vartype ui_form_definition: any """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True}, 'description': {'max_length': 4096, 'min_length': 0}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'linked_templates': {'key': 'properties.linkedTemplates', 'type': '[LinkedTemplateArtifact]'}, 'metadata': {'key': 'properties.metadata', 'type': 'object'}, 'main_template': {'key': 'properties.mainTemplate', 'type': 'object'}, 'ui_form_definition': {'key': 'properties.uiFormDefinition', 'type': 'object'}, } def __init__( self, *, location: str, tags: Optional[Dict[str, str]] = None, description: Optional[str] = None, linked_templates: Optional[List["LinkedTemplateArtifact"]] = None, metadata: Optional[Any] = None, main_template: Optional[Any] = None, ui_form_definition: Optional[Any] = None, **kwargs ): """ :keyword location: Required. The location of the Template Spec Version. It must match the location of the parent Template Spec. :paramtype location: str :keyword tags: A set of tags. Resource tags. :paramtype tags: dict[str, str] :keyword description: Template Spec version description. :paramtype description: str :keyword linked_templates: An array of linked template artifacts. :paramtype linked_templates: list[~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.LinkedTemplateArtifact] :keyword metadata: The version metadata. Metadata is an open-ended object and is typically a collection of key-value pairs. :paramtype metadata: any :keyword main_template: The main Azure Resource Manager template content. :paramtype main_template: any :keyword ui_form_definition: The Azure Resource Manager template UI definition content. :paramtype ui_form_definition: any """ super(TemplateSpecVersion, self).__init__(**kwargs) self.location = location self.tags = tags self.description = description self.linked_templates = linked_templates self.metadata = metadata self.main_template = main_template self.ui_form_definition = ui_form_definition class TemplateSpecVersionInfo(msrest.serialization.Model): """High-level information about a Template Spec version. Variables are only populated by the server, and will be ignored when sending a request. :ivar description: Template Spec version description. :vartype description: str :ivar time_created: The timestamp of when the version was created. :vartype time_created: ~datetime.datetime :ivar time_modified: The timestamp of when the version was last modified. :vartype time_modified: ~datetime.datetime """ _validation = { 'description': {'readonly': True}, 'time_created': {'readonly': True}, 'time_modified': {'readonly': True}, } _attribute_map = { 'description': {'key': 'description', 'type': 'str'}, 'time_created': {'key': 'timeCreated', 'type': 'iso-8601'}, 'time_modified': {'key': 'timeModified', 'type': 'iso-8601'}, } def __init__( self, **kwargs ): """ """ super(TemplateSpecVersionInfo, self).__init__(**kwargs) self.description = None self.time_created = None self.time_modified = None class TemplateSpecVersionsListResult(msrest.serialization.Model): """List of Template Specs versions. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: An array of Template Spec versions. :vartype value: list[~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpecVersion] :ivar next_link: The URL to use for getting the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[TemplateSpecVersion]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["TemplateSpecVersion"]] = None, **kwargs ): """ :keyword value: An array of Template Spec versions. :paramtype value: list[~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpecVersion] """ super(TemplateSpecVersionsListResult, self).__init__(**kwargs) self.value = value self.next_link = None class TemplateSpecVersionUpdateModel(AzureResourceBase): """Template Spec Version properties to be updated (only tags are currently supported). Variables are only populated by the server, and will be ignored when sending a request. :ivar id: String Id used to locate any resource on Azure. :vartype id: str :ivar name: Name of this resource. :vartype name: str :ivar type: Type of this resource. :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.SystemData :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, *, tags: Optional[Dict[str, str]] = None, **kwargs ): """ :keyword tags: A set of tags. Resource tags. :paramtype tags: dict[str, str] """ super(TemplateSpecVersionUpdateModel, self).__init__(**kwargs) self.tags = tags
# coding=utf-8 # Copyright 2022 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Github API util tests.""" import contextlib import os import textwrap from unittest import mock from etils import epath import pytest from tensorflow_datasets.core.github_api import github_path _SKIP_NON_HERMETIC = False # Non hermetic tests are explicitly marked and skipped if `_SKIP_NON_HERMETIC` # is True. non_hermetic_test = pytest.mark.skipif( _SKIP_NON_HERMETIC, reason='Non-hermetic test skipped.', ) _original_query_github = github_path.GithubApi.query _AUTHOR_EXPECTED_CONTENT = textwrap.dedent("""\ # This is the list of TensorFlow Datasets authors for copyright purposes. # # This does not necessarily list everyone who has contributed code, since in # some cases, their employer may be the copyright holder. To see the full list # of contributors, see the revision history in source control. Google Inc. """) # Note: assert_no_api_call is globally applied on all tests (in conftest.py) @contextlib.contextmanager def enable_api_call(): """Contextmanager which locally re-enable API calls.""" with mock.patch.object(github_path.GithubApi, 'query', _original_query_github): yield def test_parse_github_path(): url = 'github://tensorflow/datasets/tree/master/docs/README.md' repo, branch, path = github_path._parse_github_path(url) assert repo == 'tensorflow/datasets' assert branch == 'master' assert path == 'docs/README.md' url = 'github://tensorflow/datasets/tree/master' repo, branch, path = github_path._parse_github_path(url) assert repo == 'tensorflow/datasets' assert branch == 'master' assert path == '' # pylint: disable=g-explicit-bool-comparison def test_github_path_registered_as_path(): uri = 'github://tensorflow/datasets/tree/master/docs/README.md' path = epath.Path(uri) assert isinstance(path, github_path.GithubPath) assert os.fspath(path) == uri def test_invalid_github_path(): # Path are lazily validated, so require explicit `_metadata` call. with pytest.raises(ValueError, match='Invalid github path'): _ = github_path.GithubPath()._metadata with pytest.raises(ValueError, match='Invalid github path'): _ = github_path.GithubPath('')._metadata with pytest.raises(ValueError, match='Invalid github path'): _ = github_path.GithubPath('github://not/a/path') with pytest.raises(ValueError, match='Invalid github path'): _ = github_path.GithubPath('github://tensorflow/tree/master/docs/README.md') # `blob` isn't accepted for consistency between paths. with pytest.raises(ValueError, match='/blob/` isn\'t accepted.'): _ = github_path.GithubPath( 'github://tensorflow/datasets/blob/master/docs/README.md') p = github_path.GithubPath( 'github://tensorflow/datasets/tree/master/docs/README.md') p = p.parent # /docs _ = p._metadata p = p.parent # / _ = p._metadata p = p.parent with pytest.raises(ValueError, match='Invalid github path'): _ = p._metadata def test_github_path_purepath(): """Tests that pathlib methods works as expected.""" p = github_path.GithubPath('github://tensorflow/datasets/tree/master') sub_p = p / 'some_folder' assert isinstance(sub_p, github_path.GithubPath) assert str(p) == 'github://tensorflow/datasets/tree/master' assert str(sub_p) == 'github://tensorflow/datasets/tree/master/some_folder' assert os.fspath(p) == 'github://tensorflow/datasets/tree/master' assert p == github_path.GithubPath.from_repo('tensorflow/datasets') def test_github_path_as_url(): p = github_path.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0') p /= 'README.md' expected = 'https://raw.githubusercontent.com/tensorflow/datasets/v3.1.0/README.md' assert p.as_raw_url() == expected @non_hermetic_test def test_github_api_listdir(): """Test query github API.""" # PurePath ops do not trigger API calls p = github_path.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0') p = p / 'tensorflow_datasets' / 'testing' with enable_api_call(): sub_dirs = sorted(p.iterdir()) # `listdir` call cache the filetype of all childs all_dir_names = [d.name for d in sub_dirs if d.is_dir()] all_file_names = [d.name for d in sub_dirs if d.is_file()] all_names = [d.name for d in sub_dirs] with pytest.raises(NotADirectoryError): list((p / '__init__.py').iterdir()) assert all_names == [ '__init__.py', 'dataset_builder_testing.py', 'dataset_builder_testing_test.py', 'fake_data_generation', 'fake_data_utils.py', 'generate_archives.sh', 'metadata', 'mocking.py', 'mocking_test.py', 'test_case.py', 'test_data', 'test_utils.py', 'test_utils_test.py', ] assert all_dir_names == [ 'fake_data_generation', 'metadata', 'test_data', ] assert all_file_names == [ '__init__.py', 'dataset_builder_testing.py', 'dataset_builder_testing_test.py', 'fake_data_utils.py', 'generate_archives.sh', 'mocking.py', 'mocking_test.py', 'test_case.py', 'test_utils.py', 'test_utils_test.py', ] @non_hermetic_test def test_github_api_exists(): """Test query github API.""" p = github_path.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0') with enable_api_call(): assert p.exists() assert not (p / 'unknown_dir').exists() readme = p / 'README.md' core = p / 'tensorflow_datasets' / 'core' with enable_api_call(): assert readme.is_file() assert core.is_dir() # Data should have been cached (no API calls required) assert not readme.is_dir() assert not core.is_file() assert readme.exists() assert core.exists() # Recreating a new Path reuse the cache readme_recreated = core.parent.parent / 'README.md' assert readme_recreated.is_file() assert readme_recreated._metadata == readme._metadata @non_hermetic_test def test_github_api_read_bytes_text(): """Test query github API file content.""" p = github_path.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0') # Note: This is not wrapped inside `enable_api_call` contextmanager as # users need to download files without setting up an API token. content = (p / 'AUTHORS').read_bytes() assert isinstance(content, bytes) assert content == _AUTHOR_EXPECTED_CONTENT.encode() content = (p / 'AUTHORS').read_text() assert isinstance(content, str) assert content == _AUTHOR_EXPECTED_CONTENT # Cannot read the content of a directory. with pytest.raises(FileNotFoundError, match='Request failed'): (p / 'tensorflow_datasets' / 'core').read_bytes() @non_hermetic_test def test_github_api_copy(tmp_path): p = github_path.GithubPath.from_repo('tensorflow/datasets', 'v3.1.0') src = p / 'AUTHORS' dst = tmp_path / 'AUTHORS' target = src.copy(dst) assert target == dst assert dst.read_text() == _AUTHOR_EXPECTED_CONTENT with pytest.raises(FileExistsError, match='Destination .* exists'): src.copy(dst) src.copy(dst, overwrite=True) def test_assert_no_api_call(): with pytest.raises(AssertionError, match='Forbidden API call'): github_path.GithubPath.from_repo('tensorflow/datasets', 'v1.0.0').exists() def test_get_tree(): tree = { 'tree': [ { 'path': 'code1.py', 'type': 'blob', }, { 'path': 'myfolder', 'type': 'tree', }, { 'path': 'myfolder/code2.py', 'type': 'blob', }, { 'path': 'myfolder/mysubfolder', 'type': 'tree', }, { 'path': 'myfolder/mysubfolder/code3.py', 'type': 'blob', }, ] } with mock.patch.object(github_path.GithubApi, 'query', return_value=tree): root = github_path.GithubPath.from_repo('tensorflow/datasets', 'v9.9.9') def gh_path(file: str) -> github_path.GithubPath: return github_path.GithubPath( f'github://tensorflow/datasets/tree/v9.9.9/{file}') def assert_is_file(file): assert file.is_file() assert not file.is_dir() assert file.exists() def assert_is_folder(folder, files): assert set(folder.iterdir()) == files assert folder.is_dir() assert not folder.is_file() assert folder.exists() myfolder = gh_path('myfolder') mysubfolder = gh_path('myfolder/mysubfolder') code1 = gh_path('code1.py') code2 = gh_path('myfolder/code2.py') code3 = gh_path('myfolder/mysubfolder/code3.py') assert_is_folder(root, {code1, myfolder}) assert_is_folder(myfolder, {code2, mysubfolder}) assert_is_folder(mysubfolder, {code3}) assert_is_file(code1) assert_is_file(code2) assert_is_file(code3)
from __future__ import absolute_import, unicode_literals from itertools import groupby from django import forms from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.auth.models import Group, Permission from django.db import transaction from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ from wagtail.wagtailadmin.widgets import AdminPageChooser from wagtail.wagtailcore import hooks from wagtail.wagtailcore.models import ( PAGE_PERMISSION_TYPE_CHOICES, PAGE_PERMISSION_TYPES, GroupPagePermission, Page, UserPagePermissionsProxy) from wagtail.wagtailusers.models import UserProfile User = get_user_model() # The standard fields each user model is expected to have, as a minimum. standard_fields = set(['email', 'first_name', 'last_name', 'is_superuser', 'groups']) # Custom fields if hasattr(settings, 'WAGTAIL_USER_CUSTOM_FIELDS'): custom_fields = set(settings.WAGTAIL_USER_CUSTOM_FIELDS) else: custom_fields = set() class UsernameForm(forms.ModelForm): """ Intelligently sets up the username field if it is in fact a username. If the User model has been swapped out, and the username field is an email or something else, don't touch it. """ def __init__(self, *args, **kwargs): super(UsernameForm, self).__init__(*args, **kwargs) if User.USERNAME_FIELD == 'username': field = self.fields['username'] field.regex = r"^[\w.@+-]+$" field.help_text = _("Required. 30 characters or fewer. Letters, " "digits and @/./+/-/_ only.") field.error_messages = field.error_messages.copy() field.error_messages.update({ 'invalid': _("This value may contain only letters, numbers " "and @/./+/-/_ characters.")}) @property def username_field(self): return self[User.USERNAME_FIELD] def separate_username_field(self): return User.USERNAME_FIELD not in standard_fields class UserCreationForm(UsernameForm): required_css_class = "required" error_messages = { 'duplicate_username': _("A user with that username already exists."), 'password_mismatch': _("The two password fields didn't match."), } is_superuser = forms.BooleanField( label=_("Administrator"), required=False, help_text=_("Administrators have full access to manage any object or setting.") ) password1 = forms.CharField( label=_("Password"), required=False, widget=forms.PasswordInput, help_text=_("Leave blank if not changing.")) password2 = forms.CharField( label=_("Password confirmation"), required=False, widget=forms.PasswordInput, help_text=_("Enter the same password as above, for verification.")) email = forms.EmailField(required=True, label=_("Email")) first_name = forms.CharField(required=True, label=_("First Name")) last_name = forms.CharField(required=True, label=_("Last Name")) class Meta: model = User fields = set([User.USERNAME_FIELD]) | standard_fields | custom_fields widgets = { 'groups': forms.CheckboxSelectMultiple } def clean_username(self): username_field = User.USERNAME_FIELD username = self.cleaned_data[username_field] try: User._default_manager.get(**{username_field: username}) except User.DoesNotExist: return username raise forms.ValidationError( self.error_messages['duplicate_username'], code='duplicate_username', ) def clean_password2(self): password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch', ) return password2 def save(self, commit=True): user = super(UserCreationForm, self).save(commit=False) user.set_password(self.cleaned_data["password1"]) # users can access django-admin iff they are a superuser user.is_staff = user.is_superuser if commit: user.save() self.save_m2m() return user # Largely the same as django.contrib.auth.forms.UserCreationForm, but with enough subtle changes # (to make password non-required) that it isn't worth inheriting... class UserEditForm(UsernameForm): required_css_class = "required" error_messages = { 'duplicate_username': _("A user with that username already exists."), 'password_mismatch': _("The two password fields didn't match."), } email = forms.EmailField(required=True, label=_("Email")) first_name = forms.CharField(required=True, label=_("First Name")) last_name = forms.CharField(required=True, label=_("Last Name")) password1 = forms.CharField( label=_("Password"), required=False, widget=forms.PasswordInput, help_text=_("Leave blank if not changing.")) password2 = forms.CharField( label=_("Password confirmation"), required=False, widget=forms.PasswordInput, help_text=_("Enter the same password as above, for verification.")) is_superuser = forms.BooleanField( label=_("Administrator"), required=False, help_text=_("Administrators have full access to manage any object or setting.") ) class Meta: model = User fields = set([User.USERNAME_FIELD, "is_active"]) | standard_fields | custom_fields widgets = { 'groups': forms.CheckboxSelectMultiple } def clean_username(self): # Since User.username is unique, this check is redundant, # but it sets a nicer error message than the ORM. See #13147. username = self.cleaned_data["username"] username_field = User.USERNAME_FIELD try: User._default_manager.exclude(pk=self.instance.pk).get(**{ username_field: username}) except User.DoesNotExist: return username raise forms.ValidationError(self.error_messages['duplicate_username']) def clean_password2(self): password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 != password2: raise forms.ValidationError( self.error_messages['password_mismatch']) return password2 def save(self, commit=True): user = super(UserEditForm, self).save(commit=False) # users can access django-admin iff they are a superuser user.is_staff = user.is_superuser if self.cleaned_data["password1"]: user.set_password(self.cleaned_data["password1"]) if commit: user.save() self.save_m2m() return user class GroupForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(GroupForm, self).__init__(*args, **kwargs) self.registered_permissions = Permission.objects.none() for fn in hooks.get_hooks('register_permissions'): self.registered_permissions = self.registered_permissions | fn() self.fields['permissions'].queryset = self.registered_permissions required_css_class = "required" error_messages = { 'duplicate_name': _("A group with that name already exists."), } is_superuser = forms.BooleanField( label=_("Administrator"), required=False, help_text=_("Administrators have full access to manage any object or setting.") ) class Meta: model = Group fields = ("name", "permissions", ) widgets = { 'permissions': forms.CheckboxSelectMultiple(), } def clean_name(self): # Since Group.name is unique, this check is redundant, # but it sets a nicer error message than the ORM. See #13147. name = self.cleaned_data["name"] try: Group._default_manager.exclude(pk=self.instance.pk).get(name=name) except Group.DoesNotExist: return name raise forms.ValidationError(self.error_messages['duplicate_name']) def save(self): # We go back to the object to read (in order to reapply) the # permissions which were set on this group, but which are not # accessible in the wagtail admin interface, as otherwise these would # be clobbered by this form. try: untouchable_permissions = self.instance.permissions.exclude(pk__in=self.registered_permissions) bool(untouchable_permissions) # force this to be evaluated, as it's about to change except ValueError: # this form is not bound; we're probably creating a new group untouchable_permissions = [] group = super(GroupForm, self).save() group.permissions.add(*untouchable_permissions) return group class PagePermissionsForm(forms.Form): """ Note 'Permissions' (plural). A single instance of this form defines the permissions that are assigned to an entity (i.e. group or user) for a specific page. """ page = forms.ModelChoiceField( queryset=Page.objects.all(), widget=AdminPageChooser(show_edit_link=False, can_choose_root=True) ) permission_types = forms.MultipleChoiceField( choices=PAGE_PERMISSION_TYPE_CHOICES, required=False, widget=forms.CheckboxSelectMultiple ) class BaseGroupPagePermissionFormSet(forms.BaseFormSet): permission_types = PAGE_PERMISSION_TYPES # defined here for easy access from templates def __init__(self, data=None, files=None, instance=None, prefix='page_permissions'): if instance is None: instance = Group() self.instance = instance initial_data = [] for page, page_permissions in groupby( instance.page_permissions.order_by('page'), lambda pp: pp.page ): initial_data.append({ 'page': page, 'permission_types': [pp.permission_type for pp in page_permissions] }) super(BaseGroupPagePermissionFormSet, self).__init__( data, files, initial=initial_data, prefix=prefix ) for form in self.forms: form.fields['DELETE'].widget = forms.HiddenInput() @property def empty_form(self): empty_form = super(BaseGroupPagePermissionFormSet, self).empty_form empty_form.fields['DELETE'].widget = forms.HiddenInput() return empty_form def clean(self): """Checks that no two forms refer to the same page object""" if any(self.errors): # Don't bother validating the formset unless each form is valid on its own return pages = [ form.cleaned_data['page'] for form in self.forms # need to check for presence of 'page' in cleaned_data, # because a completely blank form passes validation if form not in self.deleted_forms and 'page' in form.cleaned_data ] if len(set(pages)) != len(pages): # pages list contains duplicates raise forms.ValidationError(_("You cannot have multiple permission records for the same page.")) @transaction.atomic def save(self): if self.instance.pk is None: raise Exception( "Cannot save a GroupPagePermissionFormSet for an unsaved group instance" ) # get a set of (page, permission_type) tuples for all ticked permissions forms_to_save = [ form for form in self.forms if form not in self.deleted_forms and 'page' in form.cleaned_data ] final_permission_records = set() for form in forms_to_save: for permission_type in form.cleaned_data['permission_types']: final_permission_records.add((form.cleaned_data['page'], permission_type)) # fetch the group's existing page permission records, and from that, build a list # of records to be created / deleted permission_ids_to_delete = [] permission_records_to_keep = set() for pp in self.instance.page_permissions.all(): if (pp.page, pp.permission_type) in final_permission_records: permission_records_to_keep.add((pp.page, pp.permission_type)) else: permission_ids_to_delete.append(pp.pk) self.instance.page_permissions.filter(pk__in=permission_ids_to_delete).delete() permissions_to_add = final_permission_records - permission_records_to_keep GroupPagePermission.objects.bulk_create([ GroupPagePermission( group=self.instance, page=page, permission_type=permission_type ) for (page, permission_type) in permissions_to_add ]) def as_admin_panel(self): return render_to_string('wagtailusers/groups/includes/page_permissions_formset.html', { 'formset': self }) GroupPagePermissionFormSet = forms.formset_factory( PagePermissionsForm, formset=BaseGroupPagePermissionFormSet, extra=0, can_delete=True ) class NotificationPreferencesForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(NotificationPreferencesForm, self).__init__(*args, **kwargs) user_perms = UserPagePermissionsProxy(self.instance.user) if not user_perms.can_publish_pages(): del self.fields['submitted_notifications'] if not user_perms.can_edit_pages(): del self.fields['approved_notifications'] del self.fields['rejected_notifications'] class Meta: model = UserProfile fields = ("submitted_notifications", "approved_notifications", "rejected_notifications")
# -*- coding: utf-8 -*- """IPython Test Suite Runner. This module provides a main entry point to a user script to test IPython itself from the command line. There are two ways of running this script: 1. With the syntax `iptest all`. This runs our entire test suite by calling this script (with different arguments) recursively. This causes modules and package to be tested in different processes, using nose or trial where appropriate. 2. With the regular nose syntax, like `iptest -vvs IPython`. In this form the script simply calls nose, but with special command line flags and plugins loaded. """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import print_function import glob from io import BytesIO import os import os.path as path import sys from threading import Thread, Lock, Event import warnings import nose.plugins.builtin from nose.plugins.xunit import Xunit from nose import SkipTest from nose.core import TestProgram from nose.plugins import Plugin from nose.util import safe_str from IPython import version_info from IPython.utils.py3compat import bytes_to_str from IPython.utils.importstring import import_item from IPython.testing.plugin.ipdoctest import IPythonDoctest from IPython.external.decorators import KnownFailure, knownfailureif pjoin = path.join # Enable printing all warnings raise by IPython's modules warnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*') if sys.version_info > (3,0): warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*') warnings.filterwarnings('error', message=".*{'config': True}.*", category=DeprecationWarning, module='IPy.*') warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*') if version_info < (6,): # nose.tools renames all things from `camelCase` to `snake_case` which raise an # warning with the runner they also import from standard import library. (as of Dec 2015) # Ignore, let's revisit that in a couple of years for IPython 6. warnings.filterwarnings('ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*') # ------------------------------------------------------------------------------ # Monkeypatch Xunit to count known failures as skipped. # ------------------------------------------------------------------------------ def monkeypatch_xunit(): try: knownfailureif(True)(lambda: None)() except Exception as e: KnownFailureTest = type(e) def addError(self, test, err, capt=None): if issubclass(err[0], KnownFailureTest): err = (SkipTest,) + err[1:] return self.orig_addError(test, err, capt) Xunit.orig_addError = Xunit.addError Xunit.addError = addError #----------------------------------------------------------------------------- # Check which dependencies are installed and greater than minimum version. #----------------------------------------------------------------------------- def extract_version(mod): return mod.__version__ def test_for(item, min_version=None, callback=extract_version): """Test to see if item is importable, and optionally check against a minimum version. If min_version is given, the default behavior is to check against the `__version__` attribute of the item, but specifying `callback` allows you to extract the value you are interested in. e.g:: In [1]: import sys In [2]: from IPython.testing.iptest import test_for In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info) Out[3]: True """ try: check = import_item(item) except (ImportError, RuntimeError): # GTK reports Runtime error if it can't be initialized even if it's # importable. return False else: if min_version: if callback: # extra processing step to get version to compare check = callback(check) return check >= min_version else: return True # Global dict where we can store information on what we have and what we don't # have available at test run time have = {'matplotlib': test_for('matplotlib'), 'pygments': test_for('pygments'), 'sqlite3': test_for('sqlite3')} #----------------------------------------------------------------------------- # Test suite definitions #----------------------------------------------------------------------------- test_group_names = ['core', 'extensions', 'lib', 'terminal', 'testing', 'utils', ] class TestSection(object): def __init__(self, name, includes): self.name = name self.includes = includes self.excludes = [] self.dependencies = [] self.enabled = True def exclude(self, module): if not module.startswith('IPython'): module = self.includes[0] + "." + module self.excludes.append(module.replace('.', os.sep)) def requires(self, *packages): self.dependencies.extend(packages) @property def will_run(self): return self.enabled and all(have[p] for p in self.dependencies) # Name -> (include, exclude, dependencies_met) test_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names} # Exclusions and dependencies # --------------------------- # core: sec = test_sections['core'] if not have['sqlite3']: sec.exclude('tests.test_history') sec.exclude('history') if not have['matplotlib']: sec.exclude('pylabtools'), sec.exclude('tests.test_pylabtools') # lib: sec = test_sections['lib'] sec.exclude('kernel') if not have['pygments']: sec.exclude('tests.test_lexers') # We do this unconditionally, so that the test suite doesn't import # gtk, changing the default encoding and masking some unicode bugs. sec.exclude('inputhookgtk') # We also do this unconditionally, because wx can interfere with Unix signals. # There are currently no tests for it anyway. sec.exclude('inputhookwx') # Testing inputhook will need a lot of thought, to figure out # how to have tests that don't lock up with the gui event # loops in the picture sec.exclude('inputhook') # testing: sec = test_sections['testing'] # These have to be skipped on win32 because they use echo, rm, cd, etc. # See ticket https://github.com/ipython/ipython/issues/87 if sys.platform == 'win32': sec.exclude('plugin.test_exampleip') sec.exclude('plugin.dtexample') # don't run jupyter_console tests found via shim test_sections['terminal'].exclude('console') # extensions: sec = test_sections['extensions'] # This is deprecated in favour of rpy2 sec.exclude('rmagic') # autoreload does some strange stuff, so move it to its own test section sec.exclude('autoreload') sec.exclude('tests.test_autoreload') test_sections['autoreload'] = TestSection('autoreload', ['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload']) test_group_names.append('autoreload') #----------------------------------------------------------------------------- # Functions and classes #----------------------------------------------------------------------------- def check_exclusions_exist(): from IPython.paths import get_ipython_package_dir from warnings import warn parent = os.path.dirname(get_ipython_package_dir()) for sec in test_sections: for pattern in sec.exclusions: fullpath = pjoin(parent, pattern) if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'): warn("Excluding nonexistent file: %r" % pattern) class ExclusionPlugin(Plugin): """A nose plugin to effect our exclusions of files and directories. """ name = 'exclusions' score = 3000 # Should come before any other plugins def __init__(self, exclude_patterns=None): """ Parameters ---------- exclude_patterns : sequence of strings, optional Filenames containing these patterns (as raw strings, not as regular expressions) are excluded from the tests. """ self.exclude_patterns = exclude_patterns or [] super(ExclusionPlugin, self).__init__() def options(self, parser, env=os.environ): Plugin.options(self, parser, env) def configure(self, options, config): Plugin.configure(self, options, config) # Override nose trying to disable plugin. self.enabled = True def wantFile(self, filename): """Return whether the given filename should be scanned for tests. """ if any(pat in filename for pat in self.exclude_patterns): return False return None def wantDirectory(self, directory): """Return whether the given directory should be scanned for tests. """ if any(pat in directory for pat in self.exclude_patterns): return False return None class StreamCapturer(Thread): daemon = True # Don't hang if main thread crashes started = False def __init__(self, echo=False): super(StreamCapturer, self).__init__() self.echo = echo self.streams = [] self.buffer = BytesIO() self.readfd, self.writefd = os.pipe() self.buffer_lock = Lock() self.stop = Event() def run(self): self.started = True while not self.stop.is_set(): chunk = os.read(self.readfd, 1024) with self.buffer_lock: self.buffer.write(chunk) if self.echo: sys.stdout.write(bytes_to_str(chunk)) os.close(self.readfd) os.close(self.writefd) def reset_buffer(self): with self.buffer_lock: self.buffer.truncate(0) self.buffer.seek(0) def get_buffer(self): with self.buffer_lock: return self.buffer.getvalue() def ensure_started(self): if not self.started: self.start() def halt(self): """Safely stop the thread.""" if not self.started: return self.stop.set() os.write(self.writefd, b'\0') # Ensure we're not locked in a read() self.join() class SubprocessStreamCapturePlugin(Plugin): name='subprocstreams' def __init__(self): Plugin.__init__(self) self.stream_capturer = StreamCapturer() self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture') # This is ugly, but distant parts of the test machinery need to be able # to redirect streams, so we make the object globally accessible. nose.iptest_stdstreams_fileno = self.get_write_fileno def get_write_fileno(self): if self.destination == 'capture': self.stream_capturer.ensure_started() return self.stream_capturer.writefd elif self.destination == 'discard': return os.open(os.devnull, os.O_WRONLY) else: return sys.__stdout__.fileno() def configure(self, options, config): Plugin.configure(self, options, config) # Override nose trying to disable plugin. if self.destination == 'capture': self.enabled = True def startTest(self, test): # Reset log capture self.stream_capturer.reset_buffer() def formatFailure(self, test, err): # Show output ec, ev, tb = err captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace') if captured.strip(): ev = safe_str(ev) out = [ev, '>> begin captured subprocess output <<', captured, '>> end captured subprocess output <<'] return ec, '\n'.join(out), tb return err formatError = formatFailure def finalize(self, result): self.stream_capturer.halt() def run_iptest(): """Run the IPython test suite using nose. This function is called when this script is **not** called with the form `iptest all`. It simply calls nose with appropriate command line flags and accepts all of the standard nose arguments. """ # Apply our monkeypatch to Xunit if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'): monkeypatch_xunit() arg1 = sys.argv[1] if arg1 in test_sections: section = test_sections[arg1] sys.argv[1:2] = section.includes elif arg1.startswith('IPython.') and arg1[8:] in test_sections: section = test_sections[arg1[8:]] sys.argv[1:2] = section.includes else: section = TestSection(arg1, includes=[arg1]) argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks # We add --exe because of setuptools' imbecility (it # blindly does chmod +x on ALL files). Nose does the # right thing and it tries to avoid executables, # setuptools unfortunately forces our hand here. This # has been discussed on the distutils list and the # setuptools devs refuse to fix this problem! '--exe', ] if '-a' not in argv and '-A' not in argv: argv = argv + ['-a', '!crash'] if nose.__version__ >= '0.11': # I don't fully understand why we need this one, but depending on what # directory the test suite is run from, if we don't give it, 0 tests # get run. Specifically, if the test suite is run from the source dir # with an argument (like 'iptest.py IPython.core', 0 tests are run, # even if the same call done in this directory works fine). It appears # that if the requested package is in the current dir, nose bails early # by default. Since it's otherwise harmless, leave it in by default # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it. argv.append('--traverse-namespace') plugins = [ ExclusionPlugin(section.excludes), KnownFailure(), SubprocessStreamCapturePlugin() ] # we still have some vestigial doctests in core if (section.name.startswith(('core', 'IPython.core'))): plugins.append(IPythonDoctest()) argv.extend([ '--with-ipdoctest', '--ipdoctest-tests', '--ipdoctest-extension=txt', ]) # Use working directory set by parent process (see iptestcontroller) if 'IPTEST_WORKING_DIR' in os.environ: os.chdir(os.environ['IPTEST_WORKING_DIR']) # We need a global ipython running in this process, but the special # in-process group spawns its own IPython kernels, so for *that* group we # must avoid also opening the global one (otherwise there's a conflict of # singletons). Ultimately the solution to this problem is to refactor our # assumptions about what needs to be a singleton and what doesn't (app # objects should, individual shells shouldn't). But for now, this # workaround allows the test suite for the inprocess module to complete. if 'kernel.inprocess' not in section.name: from IPython.testing import globalipapp globalipapp.start_ipython() # Now nose can run TestProgram(argv=argv, addplugins=plugins) if __name__ == '__main__': run_iptest()
from __future__ import unicode_literals from django.core import mail from django.utils import six from reviewboard.reviews.models import Review from reviewboard.webapi.resources import resources from reviewboard.webapi.tests.base import BaseWebAPITestCase from reviewboard.webapi.tests.mimetypes import (review_reply_item_mimetype, review_reply_list_mimetype) from reviewboard.webapi.tests.mixins import (BasicTestsMetaclass, ReviewRequestChildItemMixin, ReviewRequestChildListMixin) from reviewboard.webapi.tests.mixins_review import (ReviewItemMixin, ReviewListMixin) from reviewboard.webapi.tests.urls import (get_review_reply_item_url, get_review_reply_list_url) class BaseResourceTestCase(BaseWebAPITestCase): def _create_test_review(self, with_local_site=False): review_request = self.create_review_request( submitter=self.user, with_local_site=with_local_site) file_attachment = self.create_file_attachment(review_request) review_request.publish(review_request.submitter) review = self.create_review(review_request, publish=True) self.create_file_attachment_comment(review, file_attachment) return review @six.add_metaclass(BasicTestsMetaclass) class ResourceListTests(ReviewListMixin, ReviewRequestChildListMixin, BaseResourceTestCase): """Testing the ReviewReplyResource list APIs.""" fixtures = ['test_users'] sample_api_url = 'review-requests/<id>/reviews/<id>/replies/' resource = resources.review_reply def setup_review_request_child_test(self, review_request): review = self.create_review(review_request, publish=True) return (get_review_reply_list_url(review), review_reply_list_mimetype) def compare_item(self, item_rsp, reply): self.assertEqual(item_rsp['id'], reply.pk) self.assertEqual(item_rsp['body_top'], reply.body_top) self.assertEqual(item_rsp['body_bottom'], reply.body_bottom) if reply.body_top_rich_text: self.assertEqual(item_rsp['body_top_text_type'], 'markdown') else: self.assertEqual(item_rsp['body_top_text_type'], 'plain') if reply.body_bottom_rich_text: self.assertEqual(item_rsp['body_bottom_text_type'], 'markdown') else: self.assertEqual(item_rsp['body_bottom_text_type'], 'plain') # # HTTP GET tests # def setup_basic_get_test(self, user, with_local_site, local_site_name, populate_items): review_request = self.create_review_request( with_local_site=with_local_site, submitter=user, publish=True) review = self.create_review(review_request, publish=True) if populate_items: items = [self.create_reply(review, publish=True)] else: items = [] return (get_review_reply_list_url(review, local_site_name), review_reply_list_mimetype, items) def test_get_with_counts_only(self): """Testing the GET review-requests/<id>/reviews/<id>/replies/?counts-only=1 API """ review = self._create_test_review() self.create_reply(review, user=self.user, publish=True) rsp = self.api_get( '%s?counts-only=1' % get_review_reply_list_url(review), expected_mimetype=review_reply_list_mimetype) self.assertEqual(rsp['stat'], 'ok') self.assertEqual(rsp['count'], 1) # # HTTP POST tests # def setup_basic_post_test(self, user, with_local_site, local_site_name, post_valid_data): review_request = self.create_review_request( with_local_site=with_local_site, submitter=user, publish=True) review = self.create_review(review_request, publish=True) return (get_review_reply_list_url(review, local_site_name), review_reply_item_mimetype, {}, [review]) def check_post_result(self, user, rsp, review): reply = Review.objects.get(pk=rsp['reply']['id']) self.assertFalse(reply.body_top_rich_text) self.compare_item(rsp['reply'], reply) def test_post_with_body_top(self): """Testing the POST review-requests/<id>/reviews/<id>/replies/ API with body_top """ body_top = 'My Body Top' review_request = self.create_review_request(publish=True) review = self.create_review(review_request, publish=True) rsp = self.api_post( get_review_reply_list_url(review), {'body_top': body_top}, expected_mimetype=review_reply_item_mimetype) self.assertEqual(rsp['stat'], 'ok') reply = Review.objects.get(pk=rsp['reply']['id']) self.assertEqual(reply.body_top, body_top) def test_post_with_body_bottom(self): """Testing the POST review-requests/<id>/reviews/<id>/replies/ API with body_bottom """ body_bottom = 'My Body Bottom' review_request = self.create_review_request(publish=True) review = self.create_review(review_request, publish=True) rsp = self.api_post( get_review_reply_list_url(review), {'body_bottom': body_bottom}, expected_mimetype=review_reply_item_mimetype) self.assertEqual(rsp['stat'], 'ok') reply = Review.objects.get(pk=rsp['reply']['id']) self.assertEqual(reply.body_bottom, body_bottom) @six.add_metaclass(BasicTestsMetaclass) class ResourceItemTests(ReviewItemMixin, ReviewRequestChildItemMixin, BaseResourceTestCase): """Testing the ReviewReplyResource item APIs.""" fixtures = ['test_users'] sample_api_url = 'review-requests/<id>/reviews/<id>/replies/<id>/' resource = resources.review_reply def setup_review_request_child_test(self, review_request): review = self.create_review(review_request, publish=True) reply = self.create_reply(review, publish=True) return (get_review_reply_item_url(review, reply.pk), review_reply_item_mimetype) def compare_item(self, item_rsp, reply): self.assertEqual(item_rsp['id'], reply.pk) self.assertEqual(item_rsp['body_top'], reply.body_top) self.assertEqual(item_rsp['body_bottom'], reply.body_bottom) if reply.body_top_rich_text: self.assertEqual(item_rsp['body_top_text_type'], 'markdown') else: self.assertEqual(item_rsp['body_top_text_type'], 'plain') if reply.body_bottom_rich_text: self.assertEqual(item_rsp['body_bottom_text_type'], 'markdown') else: self.assertEqual(item_rsp['body_bottom_text_type'], 'plain') # # HTTP DELETE tests # def setup_basic_delete_test(self, user, with_local_site, local_site_name): review_request = self.create_review_request( with_local_site=with_local_site, submitter=user, publish=True) review = self.create_review(review_request, user=user, publish=True) reply = self.create_reply(review, user=user) return (get_review_reply_item_url(review, reply.pk, local_site_name), [reply, review]) def check_delete_result(self, user, reply, review): self.assertNotIn(reply, review.replies.all()) # # HTTP GET tests # def setup_basic_get_test(self, user, with_local_site, local_site_name): review_request = self.create_review_request( with_local_site=with_local_site, submitter=user, publish=True) review = self.create_review(review_request, user=user, publish=True) reply = self.create_reply(review, user=user) return (get_review_reply_item_url(review, reply.pk, local_site_name), review_reply_item_mimetype, reply) def test_get_not_modified(self): """Testing the GET review-requests/<id>/reviews/<id>/ with Not Modified response """ review_request = self.create_review_request(publish=True) review = self.create_review(review_request, publish=True) reply = self.create_reply(review, publish=True) self._testHttpCaching( get_review_reply_item_url(reply.base_reply_to, reply.id), check_etags=True) # # HTTP PUT tests # def setup_basic_put_test(self, user, with_local_site, local_site_name, put_valid_data): review_request = self.create_review_request( with_local_site=with_local_site, submitter=user, publish=True) review = self.create_review(review_request, user=user, publish=True) reply = self.create_reply(review, user=user) return (get_review_reply_item_url(review, reply.pk, local_site_name), review_reply_item_mimetype, {'body_top': 'New body top'}, reply, []) def check_put_result(self, user, item_rsp, reply, *args): self.assertEqual(item_rsp['id'], reply.pk) self.assertEqual(item_rsp['body_top'], 'New body top') self.assertEqual(item_rsp['body_top_text_type'], 'plain') reply = Review.objects.get(pk=reply.pk) self.compare_item(item_rsp, reply) def test_put_with_publish(self): """Testing the PUT review-requests/<id>/reviews/<id>/replies/<id>/?public=1 API """ self.siteconfig.set('mail_send_review_mail', True) self.siteconfig.save() review_request = self.create_review_request(publish=True) review = self.create_review(review_request, publish=True) mail.outbox = [] rsp, response = self.api_post_with_response( get_review_reply_list_url(review), expected_mimetype=review_reply_item_mimetype) self.assertIn('Location', response) self.assertIn('stat', rsp) self.assertEqual(rsp['stat'], 'ok') rsp = self.api_put( response['Location'], { 'body_top': 'Test', 'public': True, }, expected_mimetype=review_reply_item_mimetype) self.assertEqual(rsp['stat'], 'ok') reply = Review.objects.get(pk=rsp['reply']['id']) self.assertEqual(reply.public, True) self.assertEqual(len(mail.outbox), 1) def test_put_with_publish_and_trivial(self): """Testing the PUT review-requests/<id>/draft/ API with trivial changes """ self.siteconfig.set('mail_send_review_mail', True) self.siteconfig.save() review_request = self.create_review_request(submitter=self.user, publish=True) review = self.create_review(review_request, publish=True) mail.outbox = [] rsp, response = self.api_post_with_response( get_review_reply_list_url(review), expected_mimetype=review_reply_item_mimetype) self.assertIn('Location', response) self.assertIn('stat', rsp) self.assertEqual(rsp['stat'], 'ok') rsp = self.api_put( response['Location'], { 'body_top': 'Test', 'public': True, 'trivial': True }, expected_mimetype=review_reply_item_mimetype) self.assertIn('stat', rsp) self.assertEqual(rsp['stat'], 'ok') self.assertIn('reply', rsp) self.assertIn('id', rsp['reply']) reply = Review.objects.get(pk=rsp['reply']['id']) self.assertTrue(reply.public) self.assertEqual(len(mail.outbox), 0)
from App.Proxys import * data = IKVMCController( name = '', controlParamsList = [ ControlParams( joint = 'root', kp = 1000.0, kd = 200.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ), ControlParams( joint = 'pelvis_lowerback', kp = 75.0, kd = 17.0, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ), ControlParams( joint = 'lowerback_torso', kp = 75.0, kd = 17.0, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ), ControlParams( joint = 'torso_head', kp = 50.0, kd = 15.0, tauMax = 200.0, scale = ( 1.0, 0.2, 1.0 ) ), ControlParams( joint = 'lShoulder', kp = 50.0, kd = 15.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ), ControlParams( joint = 'rShoulder', kp = 50.0, kd = 15.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ), ControlParams( joint = 'lElbow', kp = 50.0, kd = 15.0, tauMax = 200.0, scale = ( 0.2, 1.0, 1.0 ) ), ControlParams( joint = 'rElbow', kp = 50.0, kd = 15.0, tauMax = 200.0, scale = ( 0.2, 1.0, 1.0 ) ), ControlParams( joint = 'lHip', kp = 300.0, kd = 35.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ), ControlParams( joint = 'rHip', kp = 300.0, kd = 35.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ), ControlParams( joint = 'lKnee', kp = 300.0, kd = 35.0, scale = ( 1.0, 1.0, 1.0 ) ), ControlParams( joint = 'rKnee', kp = 300.0, kd = 35.0, scale = ( 1.0, 1.0, 1.0 ) ), ControlParams( joint = 'lAnkle', kp = 50.0, kd = 15.0, tauMax = 100.0, scale = ( 1.0, 0.2, 0.2 ) ), ControlParams( joint = 'rAnkle', kp = 50.0, kd = 15.0, tauMax = 100.0, scale = ( 1.0, 0.2, 0.2 ) ), ControlParams( joint = 'lToeJoint', kp = 2.0, kd = 0.2, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ), ControlParams( joint = 'rToeJoint', kp = 2.0, kd = 0.2, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ) ], states = [ SimBiConState( name = 'State 0', nextStateIndex = 0, duration = 0.59, externalForces = [ ], trajectories = [ Trajectory( joint = 'root', strength = [ ], components = [ TrajectoryComponent( rotationAxis = ( 0.0, 1.0, 0.0 ), reverseOnStance = 'RIGHT', baseTrajectory = [ ( 1.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 0.0, 0.0, 1.0 ), reverseOnStance = 'RIGHT', baseTrajectory = [ ( 1.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'SWING_Hip', strength = [ ], components = [ ] ), Trajectory( joint = 'SWING_Knee', strength = [ ], components = [ TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'STANCE_Knee', strength = [ ], components = [ TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 0.5675 ), ( 0.25, 0.5675 ), ( 0.5, 0.5675 ), ( 0.75, 0.5675 ), ( 1.0, 0.5675 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'SWING_Ankle', strength = [ ], referenceFrame = 'CHARACTER_RELATIVE', components = [ TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 1.19 ), ( 0.25, 0.83453125 ), ( 0.5, 0.340065104167 ), ( 0.75, -0.159971064815 ), ( 1.0, -0.56 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 0.0, 0.0, 1.0 ), baseTrajectory = [ ( 0.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'STANCE_Ankle', strength = [ ], referenceFrame = 'CHARACTER_RELATIVE', components = [ TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, -0.56 ), ( 0.25, -0.20453125 ), ( 0.5, 0.289934895833 ), ( 0.75, 0.789971064815 ), ( 1.0, 1.19 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 0.0, 0.0, 1.0 ), reverseOnStance = 'LEFT', baseTrajectory = [ ( 0.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'SWING_Shoulder', strength = [ ], referenceFrame = 'CHARACTER_RELATIVE', components = [ TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 0.4 ), ( 0.25, 0.4 ), ( 0.5, 0.4 ), ( 0.75, 0.4 ), ( 1.0, 0.4 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 0.0, 0.0, 1.0 ), reverseOnStance = 'LEFT', baseTrajectory = [ ( 0.0, -1.5 ), ( 0.25, -1.37826048129 ), ( 0.5, -1.20891768923 ), ( 0.75, -1.03766729788 ), ( 1.0, -0.900666984803 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, -0.651047012036 ), ( 0.25, -0.311105917294 ), ( 0.5, 0.16176086193 ), ( 0.75, 0.639954353465 ), ( 1.0, 1.02250914669 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'STANCE_Shoulder', strength = [ ], referenceFrame = 'CHARACTER_RELATIVE', components = [ TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 0.4 ), ( 0.25, 0.4 ), ( 0.5, 0.4 ), ( 0.75, 0.4 ), ( 1.0, 0.4 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 0.0, 0.0, 1.0 ), reverseOnStance = 'LEFT', baseTrajectory = [ ( 0.0, 0.900666984803 ), ( 0.25, 1.02240650351 ), ( 0.5, 1.19174929557 ), ( 0.75, 1.36299968692 ), ( 1.0, 1.5 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 1.02250914669 ), ( 0.25, 0.682568051951 ), ( 0.5, 0.209701272727 ), ( 0.75, -0.268492218808 ), ( 1.0, -0.651047012036 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'STANCE_Elbow', strength = [ ], components = [ TrajectoryComponent( rotationAxis = ( 0.0, 1.0, 0.0 ), reverseOnStance = 'LEFT', baseTrajectory = [ ( 0.0, 1.47145472144 ), ( 0.25, 1.44865469994 ), ( 0.5, 1.41693928543 ), ( 0.75, 1.38486660562 ), ( 1.0, 1.35920846178 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'SWING_Elbow', strength = [ ], components = [ TrajectoryComponent( rotationAxis = ( 0.0, 1.0, 0.0 ), reverseOnStance = 'LEFT', baseTrajectory = [ ( 0.0, -1.35920846178 ), ( 0.25, -1.38200848327 ), ( 0.5, -1.41372389778 ), ( 0.75, -1.44579657759 ), ( 1.0, -1.47145472144 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'pelvis_lowerback', strength = [ ], referenceFrame = 'CHARACTER_RELATIVE', components = [ TrajectoryComponent( rotationAxis = ( 0.0, 1.0, 0.0 ), reverseOnStance = 'RIGHT', baseTrajectory = [ ( 0.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 0.0, 0.0, 1.0 ), reverseOnStance = 'RIGHT', baseTrajectory = [ ( 0.0, -0.155403232618 ), ( 0.25, -0.092270669367 ), ( 0.5, -0.00445165510104 ), ( 0.75, 0.0843566158541 ), ( 1.0, 0.155403232618 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, -0.0353280996748 ), ( 0.25, -0.0353280996748 ), ( 0.5, -0.0353280996748 ), ( 0.75, -0.0353280996748 ), ( 1.0, -0.0353280996748 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'lowerback_torso', strength = [ ], referenceFrame = 'CHARACTER_RELATIVE', components = [ TrajectoryComponent( rotationAxis = ( 0.0, 1.0, 0.0 ), reverseOnStance = 'RIGHT', baseTrajectory = [ ( 0.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 0.0, 0.0, 1.0 ), reverseOnStance = 'RIGHT', baseTrajectory = [ ( 0.0, -0.220856401634 ), ( 0.25, -0.13113348847 ), ( 0.5, -0.00632661567182 ), ( 0.75, 0.119886171721 ), ( 1.0, 0.220856401634 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 0.0408453732455 ), ( 0.25, 0.0408453732455 ), ( 0.5, 0.0408453732455 ), ( 0.75, 0.0408453732455 ), ( 1.0, 0.0408453732455 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'torso_head', strength = [ ], referenceFrame = 'CHARACTER_RELATIVE', components = [ TrajectoryComponent( rotationAxis = ( 0.0, 1.0, 0.0 ), reverseOnStance = 'RIGHT', baseTrajectory = [ ( 0.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 0.0, 0.0, 1.0 ), reverseOnStance = 'RIGHT', baseTrajectory = [ ( 0.0, -0.133448913926 ), ( 0.25, -0.0792352926433 ), ( 0.5, -0.00382275534683 ), ( 0.75, 0.0724392831378 ), ( 1.0, 0.133448913926 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ), TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 0.0 ), ( 0.25, 0.0 ), ( 0.5, 0.0 ), ( 0.75, 0.0 ), ( 1.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'SWING_ToeJoint', strength = [ ( 0.3, 0.1 ), ( 0.5, 0.1 ), ( 0.6, 1.0 ) ], components = [ TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ), Trajectory( joint = 'STANCE_ToeJoint', strength = [ ], components = [ TrajectoryComponent( rotationAxis = ( 1.0, 0.0, 0.0 ), baseTrajectory = [ ( 0.0, 0.0 ) ], dScaledTrajectory = [ ], vScaledTrajectory = [ ] ) ] ) ] ) ], sagittalTrajectory = [ ( 0.0, 0.0 ), ( 0.25, -0.1375 ), ( 0.5, -0.248046875 ), ( 0.75, 0.0854166666667 ), ( 1.0, 0.0 ) ], coronalTrajectory = [ ( 0.0, 0.0 ), ( 0.25, 0.0 ), ( 0.5, 0.0 ), ( 0.75, 0.0 ), ( 1.0, 0.0 ) ], heightTrajectory = [ ( 0.0, 0.0 ), ( 0.25, -0.03125 ), ( 0.5, 0.00078125 ), ( 0.75, 0.177083333333 ), ( 1.0, 0.0 ) ] )
import os import sys import copy import AutoGemmParameters import Common import KernelParameters def indent(il): returnTabs = "" for i in range(0, il): returnTabs += " " return returnTabs def tileInRange( tileMin, tileMax, rangeMin, rangeMax): if ( tileMax < 0 or (tileMax >= rangeMax and rangeMax>0) ) and tileMin <= rangeMin : valid = True else: valid = False #print "Range [%4ux%4u]: [%4u,%4u] is %s b/c" \ # % (rangeMin, rangeMax, tileMin, tileMax, "valid" if valid else "INVALID" ) #print "if ( %i<0 or (%u >= %u and %u>0) and %u <= %u" \ # %( tileMax, tileMax, rangeMax, rangeMax, tileMin, rangeMin ) return valid ################################################################################ # KSL - Kernel Selection Logic File ################################################################################ class KernelSelection: ############################################################################## # KSL - default constructor ############################################################################## def __init__( \ self, \ precisionList, \ orderList, \ transDict, \ betaList, \ unrollDict, \ kernelSelectionData): self.incFileName = Common.getIncludePath() + "AutoGemmKernelSelection.h" self.incFile = open(self.incFileName, "w") self.incFile.write( Common.getAutoGemmHeader() ) self.kernelSelectionFileName = Common.getIncludePath() + "AutoGemmKernelSelection.cpp" self.selectionFile = open(self.kernelSelectionFileName, "w") self.selectionFile.write( Common.getAutoGemmHeader() ) self.inc = ( "#include <clBLAS.h>\n" "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelSources.h\"\n" "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBinaries.h\"\n" "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBuildOptionsSource.h\"\n" "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBuildOptionsBinary.h\"\n" "#include \"" + Common.getRelativeIncludePath() + "AutoGemmClKernels.h\"\n" "\n" "#define EXACT_MULTIPLES(MULTIPLE_STR) MULTIPLE_STR\n" "\n" "// kernel selection logic template\n" "template<typename Precision>\n" "void gemmSelectKernel(\n" " clblasOrder order,\n" " clblasTranspose transA,\n" " clblasTranspose transB,\n" " size_t M,\n" " size_t N,\n" " size_t K,\n" " bool betaNonZero,\n" " float optimalNumElementsPerWorkItem,\n" " const char **tileKernelSource,\n" " const char **rowKernelSource,\n" " const char **colKernelSource,\n" " const char **cornerKernelSource,\n" " const char **sourceBuildOptions,\n" " const unsigned char **tileKernelBinary,\n" " const unsigned char **rowKernelBinary,\n" " const unsigned char **colKernelBinary,\n" " const unsigned char **cornerKernelBinary,\n" " size_t **tileKernelBinarySize,\n" " size_t **rowKernelBinarySize,\n" " size_t **colKernelBinarySize,\n" " size_t **cornerKernelBinarySize,\n" " const char **binaryBuildOptions,\n" " cl_kernel **tileClKernel,\n" " cl_kernel **rowClKernel,\n" " cl_kernel **colClKernel,\n" " cl_kernel **cornerClKernel,\n" " unsigned int *workGroupNumRows,\n" " unsigned int *workGroupNumCols,\n" " unsigned int *microTileNumRows,\n" " unsigned int *microTileNumCols,\n" " unsigned int *unroll\n" ");\n\n" ) self.logic = "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelSelection.h\"\n" #################################### # precision kernel = KernelParameters.KernelParameters() for precision in precisionList: #self.selectionFile.write( self.logic ) #self.logic = "" kernel.precision = precision sizeEvents = kernelSelectionData[precision] self.logic += ( "\n// " + precision + "gemm kernel selection logic\n" "template<>\n" "void gemmSelectKernel<" ) if precision == "s": self.logic += "float" elif precision == "d": self.logic += "double" elif precision == "c": self.logic += "FloatComplex" else: self.logic += "DoubleComplex" self.logic += ( ">(\n" " clblasOrder order,\n" " clblasTranspose transA,\n" " clblasTranspose transB,\n" " size_t M,\n" " size_t N,\n" " size_t K,\n" " bool betaNonZero,\n" " float optimalNumElementsPerWorkItem,\n" " const char **tileKernelSource,\n" " const char **rowKernelSource,\n" " const char **colKernelSource,\n" " const char **cornerKernelSource,\n" " const char **sourceBuildOptions,\n" " const unsigned char **tileKernelBinary,\n" " const unsigned char **rowKernelBinary,\n" " const unsigned char **colKernelBinary,\n" " const unsigned char **cornerKernelBinary,\n" " size_t **tileKernelBinarySize,\n" " size_t **rowKernelBinarySize,\n" " size_t **colKernelBinarySize,\n" " size_t **cornerKernelBinarySize,\n" " const char **binaryBuildOptions,\n" " cl_kernel **tileClKernel,\n" " cl_kernel **rowClKernel,\n" " cl_kernel **colClKernel,\n" " cl_kernel **cornerClKernel,\n" " unsigned int *workGroupNumRows,\n" " unsigned int *workGroupNumCols,\n" " unsigned int *microTileNumRows,\n" " unsigned int *microTileNumCols,\n" " unsigned int *unroll\n" ") {\n" ) #################################### # order for order in orderList: #print precision + "gemm" + "_" + order kernel.order = order self.logic += indent(1) + "if (order == " + order + ") {\n" transList = transDict[precision] #################################### # transA for transA in transList: #print precision + "gemm" + "_" + order + "_" + transA kernel.transA = transA self.logic += indent(2) + "if (transA == " if transA == "N": self.logic += "clblasNoTrans" elif transA == "T": self.logic += "clblasTrans" else: self.logic += "clblasConjTrans" self.logic += ") {\n" #################################### # transB for transB in transList: kernel.transB = transB self.logic += indent(3) + "if (transB == " if transB == "N": self.logic += "clblasNoTrans" elif transB == "T": self.logic += "clblasTrans" else: self.logic += "clblasConjTrans" self.logic += ") {\n" #################################### # beta for beta in betaList: #print precision + "gemm" + "_" + order + "_" + transA + "_" + transB + "_B" + str(beta) kernel.beta = beta self.logic += indent(4) + "if ( " if beta == 0: self.logic += "!betaNonZero" else: self.logic += "betaNonZero" self.logic += " ) {\n" #################################### # if size event for sizeEvent in sizeEvents: self.selectionFile.write( self.logic ) self.logic = "" sizeMin = sizeEvent[0] fallbackTile = sizeEvent[1] validTiles = sizeEvent[2] self.logic += indent(5)+"if ( M*N >= "+str(sizeMin)+"*"+str(sizeMin) + ") {\n" #print precision + "gemm" + "_" + order + "_" + transA + "_" + transB + "_B" + str(beta) + "_" + str(sizeMin) + "->" + str(sizeMax) #################################### # valid tiles self.logic += indent(6)+"// valid tiles\n" for tileParams in validTiles: kernel.workGroupNumRows = tileParams[0] kernel.workGroupNumCols = tileParams[1] kernel.microTileNumRows = tileParams[2] kernel.microTileNumCols = tileParams[3] kernel.macroTileNumRows = kernel.workGroupNumRows*kernel.microTileNumRows kernel.macroTileNumCols = kernel.workGroupNumCols*kernel.microTileNumCols for unroll in unrollDict[precision]: kernel.unroll = unroll self.logic += indent(6)+"if ( M%%%d == 0 && N%%%d == 0 && K%%%d == 0) {\n" \ % (kernel.getMultipleM(), kernel.getMultipleN(), kernel.getMultipleK()) self.addBodyForKernel( kernel ) self.logic += indent(6) + "}\n" #################################### # fallback tile - TODO all tiles begin added self.logic += indent(6)+"// fallback tile\n" #print "\nFallback[%i, %i]"%(sizeMin, sizeMax) kernel.workGroupNumRows = fallbackTile[0] kernel.workGroupNumCols = fallbackTile[1] kernel.microTileNumRows = fallbackTile[2] kernel.microTileNumCols = fallbackTile[3] kernel.macroTileNumRows = kernel.workGroupNumRows*kernel.microTileNumRows kernel.macroTileNumCols = kernel.workGroupNumCols*kernel.microTileNumCols for unroll in unrollDict[precision]: kernel.unroll = unroll self.logic += indent(6)+"if ( K%%%d == 0 ) {\n" \ % (kernel.getMultipleK()) self.addBodyForKernel( kernel ) self.logic += indent(6) + "}\n" #################################### # end size event self.logic += indent(5) + "} // end size\n" #################################### # end beta self.logic += indent(4) + "} // end beta\n" #################################### # end transB self.logic += indent(3) + "} // end transB\n" #################################### # end transA self.logic += indent(2) + "} // end transA\n" #################################### # end order self.logic += indent(1) + "} // end order\n" #################################### # end precision self.logic += indent(0) + "} // end precision function\n" # write last precision self.selectionFile.write( self.logic ) self.selectionFile.write( "\n" ) def addBodyForKernel( self, kernel ): #self.logic += indent(7) + "printf(\"selected kernel: " + kernel.getName() + "\\n\");\n" self.logic += indent(7) + "*tileKernelSource = " + kernel.getName() + "_src;\n" self.logic += indent(7) + "*rowKernelSource = " + kernel.getRowName() + "_src;\n" self.logic += indent(7) + "*colKernelSource = " + kernel.getColName() + "_src;\n" self.logic += indent(7) + "*cornerKernelSource = " + kernel.getCornerName() + "_src;\n" self.logic += indent(7) + "*sourceBuildOptions = " + kernel.getName() + "_srcBuildOptions;\n" self.logic += indent(7) + "*tileKernelBinary = " + kernel.getName() + "_bin;\n" self.logic += indent(7) + "*rowKernelBinary = " + kernel.getRowName() + "_bin;\n" self.logic += indent(7) + "*colKernelBinary = " + kernel.getColName() + "_bin;\n" self.logic += indent(7) + "*cornerKernelBinary = " + kernel.getCornerName() + "_bin;\n" self.logic += indent(7) + "*tileKernelBinarySize = &" + kernel.getName() + "_binSize;\n" self.logic += indent(7) + "*rowKernelBinarySize = &" + kernel.getRowName() + "_binSize;\n" self.logic += indent(7) + "*colKernelBinarySize = &" + kernel.getColName() + "_binSize;\n" self.logic += indent(7) + "*cornerKernelBinarySize = &" + kernel.getCornerName() + "_binSize;\n" self.logic += indent(7) + "*binaryBuildOptions = " + kernel.getName() + "_binBuildOptions;\n" self.logic += indent(7) + "*tileClKernel = &" + kernel.getName() + "_clKernel;\n" self.logic += indent(7) + "*rowClKernel = &" + kernel.getRowName() + "_clKernel;\n" self.logic += indent(7) + "*colClKernel = &" + kernel.getColName() + "_clKernel;\n" self.logic += indent(7) + "*cornerClKernel = &" + kernel.getCornerName() + "_clKernel;\n" self.logic += indent(7) + "*workGroupNumRows = " + kernel.getName() + "_workGroupNumRows;\n" self.logic += indent(7) + "*workGroupNumCols = " + kernel.getName() + "_workGroupNumCols;\n" self.logic += indent(7) + "*microTileNumRows = " + kernel.getName() + "_microTileNumRows;\n" self.logic += indent(7) + "*microTileNumCols = " + kernel.getName() + "_microTileNumRows;\n" self.logic += indent(7) + "*unroll = " + kernel.getName() + "_unroll;\n" self.logic += indent(7) + "return;\n" ############################################################################## # KSL - write to file ############################################################################## def writeToFile(self): self.selectionFile.close() self.incFile.write( self.inc ) self.incFile.close() ################################################################################ # KSM - Kernel Selection Manual/Specific File ################################################################################ class KernelSelectionSpecific: zeroIndent = " " tab = " " ############################################################################## # KSL - default constructor ############################################################################## def __init__(self): self.incFileName = Common.getIncludePath() + "AutoGemmKernelSelectionSpecific.h" self.incFile = open(self.incFileName, "w") self.incFile.write( Common.getAutoGemmHeader() ) self.kernelSelectionFileName = Common.getIncludePath() + "AutoGemmKernelSelectionSpecific.cpp" self.selectionFile = open(self.kernelSelectionFileName, "w") self.selectionFile.write( Common.getAutoGemmHeader() ) self.inc = ( "#include <clBLAS.h>\n" "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelSources.h\"\n" "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBinaries.h\"\n" "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBuildOptionsSource.h\"\n" "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBuildOptionsBinary.h\"\n" "#include \"" + Common.getRelativeIncludePath() + "AutoGemmClKernels.h\"\n" "\n" "// kernel selection specific template\n" "template<typename Precision>\n" "bool gemmSelectKernelSpecific(\n" " clblasOrder order,\n" " clblasTranspose transA,\n" " clblasTranspose transB,\n" " bool betaNonZero,\n" " unsigned int macroTileNumRows,\n" " unsigned int macroTileNumCols,\n" " unsigned int unroll,\n" " const char **tileKernelSource,\n" " const char **rowKernelSource,\n" " const char **colKernelSource,\n" " const char **cornerKernelSource,\n" " const char **sourceBuildOptions,\n" " const unsigned char **tileKernelBinary,\n" " const unsigned char **rowKernelBinary,\n" " const unsigned char **colKernelBinary,\n" " const unsigned char **cornerKernelBinary,\n" " size_t **tileKernelBinarySize,\n" " size_t **rowKernelBinarySize,\n" " size_t **colKernelBinarySize,\n" " size_t **cornerKernelBinarySize,\n" " const char **binaryBuildOptions,\n" " cl_kernel **tileClKernel,\n" " cl_kernel **rowClKernel,\n" " cl_kernel **colClKernel,\n" " cl_kernel **cornerClKernel,\n" " unsigned int *workGroupNumRows,\n" " unsigned int *workGroupNumCols,\n" " unsigned int *microTileNumRows,\n" " unsigned int *microTileNumCols\n" ");\n\n" ) self.logic = "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelSelectionSpecific.h\"\n" self.precisionInitialized = False self.orderInitialized = False self.transInitialized = False self.betaInitialized = False def newPrecision(self, precision ): #print "KernelSelectionSpecific: " + precision + "gemm" if self.precisionInitialized: self.logic += self.zeroIndent+self.tab+self.tab + "}\n" # 2 tabs self.logic += self.zeroIndent+self.tab + "}\n" # 1 tab self.logic += self.zeroIndent+"}\n" self.logic += self.zeroIndent + "return false; // didn't find a match\n" self.logic += "}\n\n" else: self.logic += self.zeroIndent self.logic += ( "\n// " + precision + "gemm kernel selection specific\n" "template<>\n" "bool gemmSelectKernelSpecific<" ) if precision == "s": self.logic += "float" elif precision == "d": self.logic += "double" elif precision == "c": self.logic += "FloatComplex" else: self.logic += "DoubleComplex" self.logic += ( ">(\n" " clblasOrder order,\n" " clblasTranspose transA,\n" " clblasTranspose transB,\n" " bool betaNonZero,\n" " unsigned int macroTileNumRows,\n" " unsigned int macroTileNumCols,\n" " unsigned int unroll,\n" " const char **tileKernelSource,\n" " const char **rowKernelSource,\n" " const char **colKernelSource,\n" " const char **cornerKernelSource,\n" " const char **sourceBuildOptions,\n" " const unsigned char **tileKernelBinary,\n" " const unsigned char **rowKernelBinary,\n" " const unsigned char **colKernelBinary,\n" " const unsigned char **cornerKernelBinary,\n" " size_t **tileKernelBinarySize,\n" " size_t **rowKernelBinarySize,\n" " size_t **colKernelBinarySize,\n" " size_t **cornerKernelBinarySize,\n" " const char **binaryBuildOptions,\n" " cl_kernel **tileClKernel,\n" " cl_kernel **rowClKernel,\n" " cl_kernel **colClKernel,\n" " cl_kernel **cornerClKernel,\n" " unsigned int *workGroupNumRows,\n" " unsigned int *workGroupNumCols,\n" " unsigned int *microTileNumRows,\n" " unsigned int *microTileNumCols\n" ") {\n" ) self.precisionInitialized = True self.orderInitialized = False self.transInitialized = False self.betaInitialized = False #################################### # KSL - new order def newOrder(self, order): if (self.orderInitialized): self.logic += self.zeroIndent+self.tab+self.tab + "}\n" # 2 tabs self.logic += self.zeroIndent+self.tab + "}\n" # 1 tab self.logic += self.zeroIndent self.logic += "} else " else: self.logic += self.zeroIndent self.logic += "if (order == " + order + ") {\n" self.orderInitialized = True self.transInitialized = False self.betaInitialized = False #################################### # KSL - new trans def newTrans(self, transA, transB): if (self.transInitialized): self.logic += self.zeroIndent+self.tab+self.tab + "}\n" # 2 tabs self.logic += self.zeroIndent+self.tab # 1 tab self.logic += "} else " else: self.logic += self.zeroIndent+self.tab # 1 tabs self.logic += "if (transA == " if transA == "N": self.logic += "clblasNoTrans" elif transA == "T": self.logic += "clblasTrans" else: self.logic += "clblasConjTrans" self.logic += " && transB == " if transB == "N": self.logic += "clblasNoTrans" elif transB == "T": self.logic += "clblasTrans" else: self.logic += "clblasConjTrans" self.logic += ") {\n" self.transInitialized = True self.betaInitialized = False #################################### # KSL - new beta def newBeta(self, beta): if (self.betaInitialized): self.logic += self.zeroIndent+self.tab+self.tab # 2 tabs self.logic += "} else " else: self.logic += self.zeroIndent+self.tab+self.tab # 2 tabs self.logic += "if ( " if beta == 0: self.logic += "!betaNonZero" else: self.logic += "betaNonZero" self.logic += " ) {\n" self.betaInitialized = True ############################################################################## # KSL - add new kernel ############################################################################## def newKernel(self, kernel): # new kernel self.logic += self.zeroIndent+self.tab+self.tab+self.tab # 3 tabs self.logic += ("if ( macroTileNumRows == %u && macroTileNumCols == %u " "&& unroll == %u) {\n") \ % ( kernel.macroTileNumRows, kernel.macroTileNumCols, kernel.unroll ) #self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab+self.tab # 5 tabs #self.logic += "printf(\"selected kernel: " + kernel.getName() + "\\n\");\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*tileKernelSource = " + kernel.getName() + "_src;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*rowKernelSource = " + kernel.getRowName() + "_src;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*colKernelSource = " + kernel.getColName() + "_src;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*cornerKernelSource = " + kernel.getCornerName() + "_src;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*sourceBuildOptions = " + kernel.getName() + "_srcBuildOptions;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*tileKernelBinary = " + kernel.getName() + "_bin;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*rowKernelBinary = " + kernel.getRowName() + "_bin;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*colKernelBinary = " + kernel.getColName() + "_bin;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*cornerKernelBinary = " + kernel.getCornerName() + "_bin;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*tileKernelBinarySize = &" + kernel.getName() + "_binSize;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*rowKernelBinarySize = &" + kernel.getRowName() + "_binSize;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*colKernelBinarySize = &" + kernel.getColName() + "_binSize;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*cornerKernelBinarySize = &" + kernel.getCornerName() + "_binSize;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*binaryBuildOptions = " + kernel.getName() + "_binBuildOptions;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*tileClKernel = &" + kernel.getName() + "_clKernel;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*rowClKernel = &" + kernel.getRowName() + "_clKernel;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*colClKernel = &" + kernel.getColName() + "_clKernel;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*cornerClKernel = &" + kernel.getCornerName() + "_clKernel;\n" # dims self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*workGroupNumRows = " + kernel.getName() + "_workGroupNumRows;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*workGroupNumCols = " + kernel.getName() + "_workGroupNumCols;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*microTileNumRows = " + kernel.getName() + "_microTileNumRows;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "*microTileNumCols = " + kernel.getName() + "_microTileNumCols;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab+self.tab # 4 tabs self.logic += "return true;\n" self.logic += self.zeroIndent+self.tab+self.tab+self.tab # 3 tabs self.logic += "}\n" self.selectionFile.write( self.logic ) self.logic = "" ############################################################################## # KSL - write to file ############################################################################## def writeToFile(self): self.logic += self.zeroIndent+self.tab+self.tab + "}\n" # 2 tabs self.logic += self.zeroIndent+self.tab + "}\n" # 1 tab self.logic += self.zeroIndent + "}\n" # 0 tab self.logic += self.zeroIndent + "return false; // didn't find a match\n" self.logic += "}\n" # close function self.selectionFile.write(self.logic) self.selectionFile.write("\n") self.selectionFile.close() self.incFile.write(self.inc) self.incFile.write("\n") self.incFile.close() ################################################################################ # Main ################################################################################ def writeKernelSelection(): print "AutoGemm.py: Generating kernel selection." if not os.path.exists( Common.getIncludePath() ): os.makedirs( Common.getIncludePath() ) ######################################## # kernel selection specific kss = KernelSelectionSpecific() # for each precision kernel = KernelParameters.KernelParameters() for precision in AutoGemmParameters.precisions: kernel.precision = precision kss.newPrecision(precision) # valid tiles for this precision tiles = AutoGemmParameters.getTilesForPrecision(precision) # for non tile parameters for order in AutoGemmParameters.orders: kernel.order = order kss.newOrder(order) for transA in AutoGemmParameters.transposes[precision]: kernel.transA = transA for transB in AutoGemmParameters.transposes[precision]: kernel.transB = transB kss.newTrans(transA, transB) for beta in AutoGemmParameters.betas: kernel.beta = beta kss.newBeta(beta) # for tile parameters for tile in tiles: kernel.useTile(tile) kss.newKernel(kernel) kss.writeToFile() ######################################## # kernel selection ks = KernelSelection( \ AutoGemmParameters.precisions, \ AutoGemmParameters.orders, \ AutoGemmParameters.transposes, \ AutoGemmParameters.betas, \ AutoGemmParameters.unrolls, \ AutoGemmParameters.kernelSelectionData ) ks.writeToFile() ################################################################################ # Main ################################################################################ if __name__ == "__main__": if len(sys.argv) == 2: Common.setOutputPath(sys.argv[1]) else: print "Warning: No output path specified; default is working directory." writeKernelSelection()
#!/usr/bin/env python # # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Submatrix-wise Vector Embedding Learner. Implementation of SwiVel algorithm described at: http://arxiv.org/abs/1602.02215 This program expects an input directory that contains the following files. row_vocab.txt, col_vocab.txt The row an column vocabulary files. Each file should contain one token per line; these will be used to generate a tab-separate file containing the trained embeddings. row_sums.txt, col_sum.txt The matrix row and column marginal sums. Each file should contain one decimal floating point number per line which corresponds to the marginal count of the matrix for that row or column. shards.recs A file containing the sub-matrix shards, stored as TFRecords. Each shard is expected to be a serialzed tf.Example protocol buffer with the following properties: global_row: the global row indicies contained in the shard global_col: the global column indicies contained in the shard sparse_local_row, sparse_local_col, sparse_value: three parallel arrays that are a sparse representation of the submatrix counts. It will generate embeddings, training from the input directory for the specified number of epochs. When complete, it will output the trained vectors to a tab-separated file that contains one line per embedding. Row and column embeddings are stored in separate files. """ import argparse import glob import math import os import sys import time import threading import numpy as np import tensorflow as tf flags = tf.app.flags flags.DEFINE_string('input_base_path', '/tmp/swivel_data', 'Directory containing input shards, vocabularies, ' 'and marginals.') flags.DEFINE_string('output_base_path', '/tmp/swivel_data', 'Path where to write the trained embeddings.') flags.DEFINE_integer('embedding_size', 300, 'Size of the embeddings') flags.DEFINE_boolean('trainable_bias', False, 'Biases are trainable') flags.DEFINE_integer('submatrix_rows', 4096, 'Rows in each training submatrix. ' 'This must match the training data.') flags.DEFINE_integer('submatrix_cols', 4096, 'Rows in each training submatrix. ' 'This must match the training data.') flags.DEFINE_float('loss_multiplier', 1.0 / 4096, 'constant multiplier on loss.') flags.DEFINE_float('confidence_exponent', 0.5, 'Exponent for l2 confidence function') flags.DEFINE_float('confidence_scale', 0.25, 'Scale for l2 confidence function') flags.DEFINE_float('confidence_base', 0.1, 'Base for l2 confidence function') flags.DEFINE_float('learning_rate', 1.0, 'Initial learning rate') flags.DEFINE_integer('num_concurrent_steps', 2, 'Number of threads to train with') flags.DEFINE_float('num_epochs', 40, 'Number epochs to train for') flags.DEFINE_float('per_process_gpu_memory_fraction', 0.25, 'Fraction of GPU memory to use') FLAGS = flags.FLAGS def embeddings_with_init(vocab_size, embedding_dim, name): """Creates and initializes the embedding tensors.""" return tf.get_variable(name=name, shape=[vocab_size, embedding_dim], initializer=tf.random_normal_initializer( stddev=math.sqrt(1.0 / embedding_dim))) def count_matrix_input(filenames, submatrix_rows, submatrix_cols): """Reads submatrix shards from disk.""" filename_queue = tf.train.string_input_producer(filenames) reader = tf.WholeFileReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64), 'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64), 'sparse_local_row': tf.VarLenFeature(dtype=tf.int64), 'sparse_local_col': tf.VarLenFeature(dtype=tf.int64), 'sparse_value': tf.VarLenFeature(dtype=tf.float32) }) global_row = features['global_row'] global_col = features['global_col'] sparse_local_row = features['sparse_local_row'].values sparse_local_col = features['sparse_local_col'].values sparse_count = features['sparse_value'].values sparse_indices = tf.concat(1, [tf.expand_dims(sparse_local_row, 1), tf.expand_dims(sparse_local_col, 1)]) count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols], sparse_count) queued_global_row, queued_global_col, queued_count = tf.train.batch( [global_row, global_col, count], batch_size=1, num_threads=4, capacity=32) queued_global_row = tf.reshape(queued_global_row, [submatrix_rows]) queued_global_col = tf.reshape(queued_global_col, [submatrix_cols]) queued_count = tf.reshape(queued_count, [submatrix_rows, submatrix_cols]) return queued_global_row, queued_global_col, queued_count def read_marginals_file(filename): """Reads text file with one number per line to an array.""" with open(filename) as lines: return [float(line) for line in lines] def write_embedding_tensor_to_disk(vocab_path, output_path, sess, embedding): """Writes tensor to output_path as tsv""" # Fetch the embedding values from the model embeddings = sess.run(embedding) with open(output_path, 'w') as out_f: with open(vocab_path) as vocab_f: for index, word in enumerate(vocab_f): word = word.strip() embedding = embeddings[index] out_f.write(word + '\t' + '\t'.join([str(x) for x in embedding]) + '\n') def write_embeddings_to_disk(config, model, sess): """Writes row and column embeddings disk""" # Row Embedding row_vocab_path = config.input_base_path + '/row_vocab.txt' row_embedding_output_path = config.output_base_path + '/row_embedding.tsv' print 'Writing row embeddings to:', row_embedding_output_path sys.stdout.flush() write_embedding_tensor_to_disk(row_vocab_path, row_embedding_output_path, sess, model.row_embedding) # Column Embedding col_vocab_path = config.input_base_path + '/col_vocab.txt' col_embedding_output_path = config.output_base_path + '/col_embedding.tsv' print 'Writing column embeddings to:', col_embedding_output_path sys.stdout.flush() write_embedding_tensor_to_disk(col_vocab_path, col_embedding_output_path, sess, model.col_embedding) class SwivelModel(object): """Small class to gather needed pieces from a Graph being built.""" def __init__(self, config): """Construct graph for dmc.""" self._config = config # Create paths to input data files print 'Reading model from:', config.input_base_path sys.stdout.flush() count_matrix_files = glob.glob(config.input_base_path + '/shard-*.pb') row_sums_path = config.input_base_path + '/row_sums.txt' col_sums_path = config.input_base_path + '/col_sums.txt' # Read marginals row_sums = read_marginals_file(row_sums_path) col_sums = read_marginals_file(col_sums_path) self.n_rows = len(row_sums) self.n_cols = len(col_sums) print 'Matrix dim: (%d,%d) SubMatrix dim: (%d,%d) ' % ( self.n_rows, self.n_cols, config.submatrix_rows, config.submatrix_cols) sys.stdout.flush() self.n_submatrices = (self.n_rows * self.n_cols / (config.submatrix_rows * config.submatrix_cols)) print 'n_submatrices: %d' % (self.n_submatrices) sys.stdout.flush() # ===== CREATE VARIABLES ====== with tf.device('/cpu:0'): # embeddings self.row_embedding = embeddings_with_init( embedding_dim=config.embedding_size, vocab_size=self.n_rows, name='row_embedding') self.col_embedding = embeddings_with_init( embedding_dim=config.embedding_size, vocab_size=self.n_cols, name='col_embedding') tf.histogram_summary('row_emb', self.row_embedding) tf.histogram_summary('col_emb', self.col_embedding) matrix_log_sum = math.log(np.sum(row_sums) + 1) row_bias_init = [math.log(x + 1) for x in row_sums] col_bias_init = [math.log(x + 1) for x in col_sums] self.row_bias = tf.Variable(row_bias_init, trainable=config.trainable_bias) self.col_bias = tf.Variable(col_bias_init, trainable=config.trainable_bias) tf.histogram_summary('row_bias', self.row_bias) tf.histogram_summary('col_bias', self.col_bias) # ===== CREATE GRAPH ===== # Get input with tf.device('/cpu:0'): global_row, global_col, count = count_matrix_input( count_matrix_files, config.submatrix_rows, config.submatrix_cols) # Fetch embeddings. selected_row_embedding = tf.nn.embedding_lookup(self.row_embedding, global_row) selected_col_embedding = tf.nn.embedding_lookup(self.col_embedding, global_col) # Fetch biases. selected_row_bias = tf.nn.embedding_lookup([self.row_bias], global_row) selected_col_bias = tf.nn.embedding_lookup([self.col_bias], global_col) # Multiply the row and column embeddings to generate predictions. predictions = tf.matmul( selected_row_embedding, selected_col_embedding, transpose_b=True) # These binary masks separate zero from non-zero values. count_is_nonzero = tf.to_float(tf.cast(count, tf.bool)) count_is_zero = 1 - tf.to_float(tf.cast(count, tf.bool)) objectives = count_is_nonzero * tf.log(count + 1e-30) objectives -= tf.reshape(selected_row_bias, [config.submatrix_rows, 1]) objectives -= selected_col_bias objectives += matrix_log_sum err = predictions - objectives # The confidence function scales the L2 loss based on the raw co-occurrence # count. l2_confidence = (config.confidence_base + config.confidence_scale * tf.pow( count, config.confidence_exponent)) l2_loss = config.loss_multiplier * tf.reduce_sum( 0.5 * l2_confidence * err * err * count_is_nonzero) sigmoid_loss = config.loss_multiplier * tf.reduce_sum( tf.nn.softplus(err) * count_is_zero) self.loss = l2_loss + sigmoid_loss tf.scalar_summary("l2_loss", l2_loss) tf.scalar_summary("sigmoid_loss", sigmoid_loss) tf.scalar_summary("loss", self.loss) # Add optimizer. self.global_step = tf.Variable(0, name='global_step') opt = tf.train.AdagradOptimizer(config.learning_rate) self.train_op = opt.minimize(self.loss, global_step=self.global_step) self.saver = tf.train.Saver(sharded=True) def main(_): # Create the output path. If this fails, it really ought to fail # now. :) if not os.path.isdir(FLAGS.output_base_path): os.makedirs(FLAGS.output_base_path) # Create and run model with tf.Graph().as_default(): model = SwivelModel(FLAGS) # Create a session for running Ops on the Graph. gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=FLAGS.per_process_gpu_memory_fraction) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) # Run the Op to initialize the variables. sess.run(tf.initialize_all_variables()) # Start feeding input coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Calculate how many steps each thread should run n_total_steps = int(FLAGS.num_epochs * model.n_rows * model.n_cols) / ( FLAGS.submatrix_rows * FLAGS.submatrix_cols) n_steps_per_thread = n_total_steps / FLAGS.num_concurrent_steps n_submatrices_to_train = model.n_submatrices * FLAGS.num_epochs t0 = [time.time()] def TrainingFn(): for _ in range(n_steps_per_thread): _, global_step = sess.run([model.train_op, model.global_step]) n_steps_between_status_updates = 100 if (global_step % n_steps_between_status_updates) == 0: elapsed = float(time.time() - t0[0]) print '%d/%d submatrices trained (%.1f%%), %.1f submatrices/sec' % ( global_step, n_submatrices_to_train, 100.0 * global_step / n_submatrices_to_train, n_steps_between_status_updates / elapsed) sys.stdout.flush() t0[0] = time.time() # Start training threads train_threads = [] for _ in range(FLAGS.num_concurrent_steps): t = threading.Thread(target=TrainingFn) train_threads.append(t) t.start() # Wait for threads to finish. for t in train_threads: t.join() coord.request_stop() coord.join(threads) # Write out vectors write_embeddings_to_disk(FLAGS, model, sess) #Shutdown sess.close() if __name__ == '__main__': tf.app.run()
#!/usr/bin/env python """Tests for the FileFinder flow.""" import os from grr.client import vfs from grr.lib import aff4 from grr.lib import rdfvalue from grr.lib import test_lib # pylint:mode=test class FileFinderActionMock(test_lib.ActionMock): def __init__(self): super(FileFinderActionMock, self).__init__( "Find", "TransferBuffer", "HashBuffer", "HashFile", "FingerprintFile", "Grep", "StatFile") def HandleMessage(self, message): responses = super(FileFinderActionMock, self).HandleMessage(message) predefined_values = { "auth.log": (1333333330, 1333333332, 1333333334), "dpkg.log": (1444444440, 1444444442, 1444444444), "dpkg_false.log": (1555555550, 1555555552, 1555555554) } processed_responses = [] for response in responses: payload = response.payload if isinstance(payload, rdfvalue.FindSpec): basename = payload.hit.pathspec.Basename() try: payload.hit.st_atime = predefined_values[basename][0] payload.hit.st_mtime = predefined_values[basename][1] payload.hit.st_ctime = predefined_values[basename][2] response.payload = payload except KeyError: pass processed_responses.append(response) return processed_responses class TestFileFinderFlow(test_lib.FlowTestsBaseclass): """Test the FetchFiles flow.""" def FileNameToURN(self, fname): return rdfvalue.RDFURN(self.client_id).Add("/fs/os").Add( os.path.join(os.path.dirname(self.base_path), "test_data", fname)) def CheckFilesHashed(self, fnames): """Checks the returned hashes.""" hashes = { "auth.log": ("67b8fc07bd4b6efc3b2dce322e8ddf609b540805", "264eb6ff97fc6c37c5dd4b150cb0a797", "91c8d6287a095a6fa6437dac50ffe3fe5c5e0d06dff" "3ae830eedfce515ad6451"), "dpkg.log": ("531b1cfdd337aa1663f7361b2fd1c8fe43137f4a", "26973f265ce5ecc1f86bc413e65bfc1d", "48303a1e7ceec679f6d417b819f42779575ffe8eabf" "9c880d286a1ee074d8145"), "dpkg_false.log": ("a2c9cc03c613a44774ae97ed6d181fe77c13e01b", "ab48f3548f311c77e75ac69ac4e696df", "a35aface4b45e3f1a95b0df24efc50e14fbedcaa6a7" "50ba32358eaaffe3c4fb0") } for fname in fnames: try: file_hashes = hashes[fname] except KeyError: raise RuntimeError("Can't check unexpected result for correct " "hashes: %s" % fname) fd = aff4.FACTORY.Open(self.FileNameToURN(fname), token=self.token) fingerprint = fd.Get(fd.Schema.FINGERPRINT) self.assertEqual(fingerprint.GetFingerprint( "generic")["sha1"].encode("hex"), file_hashes[0]) self.assertEqual(fingerprint.GetFingerprint( "generic")["md5"].encode("hex"), file_hashes[1]) self.assertEqual(fingerprint.GetFingerprint( "generic")["sha256"].encode("hex"), file_hashes[2]) def CheckFilesNotHashed(self, fnames): for fname in fnames: fd = aff4.FACTORY.Open(self.FileNameToURN(fname), token=self.token) self.assertTrue(fd.Get(fd.Schema.FINGERPRINT) is None) def CheckFilesDownloaded(self, fnames): for fname in fnames: fd = aff4.FACTORY.Open(self.FileNameToURN(fname), token=self.token) self.assertTrue(fd.Get(fd.Schema.SIZE) > 100) def CheckFilesNotDownloaded(self, fnames): for fname in fnames: fd = aff4.FACTORY.Open(self.FileNameToURN(fname), token=self.token) self.assertEqual(fd.Get(fd.Schema.SIZE), 0) def CheckFilesInCollection(self, fnames): if fnames: # If results are expected, check that they are present in the collection. # Also check that there are no other files. output = aff4.FACTORY.Open(self.client_id.Add(self.output_path), aff4_type="RDFValueCollection", token=self.token) self.assertEqual(len(output), len(fnames)) sorted_output = sorted(output, key=lambda x: x.stat_entry.aff4path.Basename()) for fname, result in zip(sorted(fnames), sorted_output): self.assertTrue(isinstance(result, rdfvalue.FileFinderResult)) self.assertEqual(result.stat_entry.aff4path.Basename(), fname) else: # If no results are expected, collection shouldn't be created. self.assertRaises(aff4.InstantiationError, aff4.FACTORY.Open, self.client_id.Add(self.output_path), aff4_type="RDFValueCollection", token=self.token) def RunFlowAndCheckResults( self, filters=None, action=rdfvalue.FileFinderAction.Action.DO_NOTHING, expected_files=None, non_expected_files=None): filters = filters or [] expected_files = expected_files or [] non_expected_files = non_expected_files or [] for fname in expected_files + non_expected_files: aff4.FACTORY.Delete(self.FileNameToURN(fname), token=self.token) for _ in test_lib.TestFlowHelper( "FileFinder", self.client_mock, client_id=self.client_id, paths=[self.path], pathtype=rdfvalue.PathSpec.PathType.OS, action=rdfvalue.FileFinderAction( action_type=action), filters=filters, token=self.token, output=self.output_path): pass self.CheckFilesInCollection(expected_files) if action == rdfvalue.FileFinderAction.Action.DO_NOTHING: self.CheckFilesNotDownloaded(expected_files + non_expected_files) self.CheckFilesNotHashed(expected_files + non_expected_files) elif action == rdfvalue.FileFinderAction.Action.DOWNLOAD: self.CheckFilesDownloaded(expected_files) self.CheckFilesNotDownloaded(non_expected_files) self.CheckFilesNotHashed(expected_files + non_expected_files) elif action == rdfvalue.FileFinderAction.Action.HASH: self.CheckFilesNotDownloaded(expected_files + non_expected_files) self.CheckFilesHashed(expected_files) self.CheckFilesNotHashed(non_expected_files) def setUp(self): super(TestFileFinderFlow, self).setUp() self.output_path = "analysis/file_finder" self.client_mock = FileFinderActionMock() self.pattern = "test_data/*.log" self.path = os.path.join(os.path.dirname(self.base_path), self.pattern) def testFileFinderNoActionWithoutFilters(self): self.RunFlowAndCheckResults( action=rdfvalue.FileFinderAction.Action.DO_NOTHING, expected_files=["auth.log", "dpkg.log", "dpkg_false.log"]) def testFileFinderDownloadActionWithoutFilters(self): self.RunFlowAndCheckResults( action=rdfvalue.FileFinderAction.Action.DOWNLOAD, expected_files=["auth.log", "dpkg.log", "dpkg_false.log"]) def testFileFinderHashActionWithoutFilters(self): self.RunFlowAndCheckResults( action=rdfvalue.FileFinderAction.Action.HASH, expected_files=["auth.log", "dpkg.log", "dpkg_false.log"]) def testLiteralMatchFilterWithDifferentActions(self): expected_files = ["auth.log"] non_expected_files = ["dpkg.log", "dpkg_false.log"] literal_filter = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.CONTENTS_LITERAL_MATCH, contents_literal_match= rdfvalue.FileFinderContentsLiteralMatchFilter( mode=rdfvalue.FileFinderContentsLiteralMatchFilter.Mode.ALL_HITS, literal="session opened for user dearjohn")) for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): self.RunFlowAndCheckResults( action=action, filters=[literal_filter], expected_files=expected_files, non_expected_files=non_expected_files) # Check that the results' matches fields are correctly filled. fd = aff4.FACTORY.Open(self.client_id.Add(self.output_path), aff4_type="RDFValueCollection", token=self.token) self.assertEqual(len(fd), 1) self.assertEqual(len(fd[0].matches), 1) self.assertEqual(fd[0].matches[0].offset, 350) self.assertEqual(fd[0].matches[0].data, "session): session opened for user dearjohn by (uid=0") def testRegexMatchFilterWithDifferentActions(self): expected_files = ["auth.log"] non_expected_files = ["dpkg.log", "dpkg_false.log"] regex_filter = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.CONTENTS_REGEX_MATCH, contents_regex_match=rdfvalue.FileFinderContentsRegexMatchFilter( mode=rdfvalue.FileFinderContentsRegexMatchFilter.Mode.ALL_HITS, regex="session opened for user .*?john")) for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): self.RunFlowAndCheckResults( action=action, filters=[regex_filter], expected_files=expected_files, non_expected_files=non_expected_files) fd = aff4.FACTORY.Open(self.client_id.Add(self.output_path), aff4_type="RDFValueCollection", token=self.token) self.assertEqual(len(fd), 1) self.assertEqual(len(fd[0].matches), 1) self.assertEqual(fd[0].matches[0].offset, 350) self.assertEqual(fd[0].matches[0].data, "session): session opened for user dearjohn by (uid=0") def testTwoRegexMatchFiltersWithDifferentActions1(self): expected_files = ["auth.log"] non_expected_files = ["dpkg.log", "dpkg_false.log"] regex_filter1 = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.CONTENTS_REGEX_MATCH, contents_regex_match=rdfvalue.FileFinderContentsRegexMatchFilter( mode=rdfvalue.FileFinderContentsRegexMatchFilter.Mode.ALL_HITS, regex="session opened for user .*?john")) regex_filter2 = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.CONTENTS_REGEX_MATCH, contents_regex_match=rdfvalue.FileFinderContentsRegexMatchFilter( mode=rdfvalue.FileFinderContentsRegexMatchFilter.Mode.ALL_HITS, regex="format.*should")) for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): self.RunFlowAndCheckResults( action=action, filters=[regex_filter1, regex_filter2], expected_files=expected_files, non_expected_files=non_expected_files) # Check the output file is created fd = aff4.FACTORY.Open(self.client_id.Add(self.output_path), aff4_type="RDFValueCollection", token=self.token) self.assertEqual(len(fd), 1) self.assertEqual(len(fd[0].matches), 2) self.assertEqual(fd[0].matches[0].offset, 350) self.assertEqual(fd[0].matches[0].data, "session): session opened for user dearjohn by (uid=0") self.assertEqual(fd[0].matches[1].offset, 513) self.assertEqual(fd[0].matches[1].data, "rong line format.... should not be he") def testTwoRegexMatchFiltersWithDifferentActions2(self): expected_files = ["auth.log"] non_expected_files = ["dpkg.log", "dpkg_false.log"] regex_filter1 = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.CONTENTS_REGEX_MATCH, contents_regex_match= rdfvalue.FileFinderContentsRegexMatchFilter( mode=rdfvalue.FileFinderContentsRegexMatchFilter.Mode.ALL_HITS, regex="session opened for user .*?john")) regex_filter2 = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.CONTENTS_REGEX_MATCH, contents_regex_match= rdfvalue.FileFinderContentsRegexMatchFilter( mode=rdfvalue.FileFinderContentsRegexMatchFilter.Mode.FIRST_HIT, regex=".*")) for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): self.RunFlowAndCheckResults( action=action, filters=[regex_filter1, regex_filter2], expected_files=expected_files, non_expected_files=non_expected_files) # Check the output file is created fd = aff4.FACTORY.Open(self.client_id.Add(self.output_path), aff4_type="RDFValueCollection", token=self.token) self.assertEqual(len(fd), 1) self.assertEqual(len(fd[0].matches), 2) self.assertEqual(fd[0].matches[0].offset, 350) self.assertEqual(fd[0].matches[0].data, "session): session opened for user dearjohn by (uid=0") self.assertEqual(fd[0].matches[1].offset, 0) self.assertEqual(fd[0].matches[1].length, 770) def testSizeFilterWithDifferentActions(self): expected_files = ["dpkg.log", "dpkg_false.log"] non_expected_files = ["auth.log"] size_filter = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.SIZE, size=rdfvalue.FileFinderSizeFilter(max_file_size=626)) for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): self.RunFlowAndCheckResults( action=action, filters=[size_filter], expected_files=expected_files, non_expected_files=non_expected_files) def testSizeAndRegexFiltersWithDifferentActions(self): expected_files = [] non_expected_files = ["dpkg.log", "dpkg_false.log", "auth.log"] size_filter = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.SIZE, size=rdfvalue.FileFinderSizeFilter( max_file_size=626)) regex_filter = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.CONTENTS_REGEX_MATCH, contents_regex_match=rdfvalue.FileFinderContentsRegexMatchFilter( mode=rdfvalue.FileFinderContentsRegexMatchFilter.Mode.ALL_HITS, regex="session opened for user .*?john")) for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): self.RunFlowAndCheckResults( action=action, filters=[size_filter, regex_filter], expected_files=expected_files, non_expected_files=non_expected_files) # Check that order of filters doesn't influence results for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): self.RunFlowAndCheckResults( action=action, filters=[regex_filter, size_filter], expected_files=expected_files, non_expected_files=non_expected_files) def testModificationTimeFilterWithDifferentActions(self): expected_files = ["dpkg.log", "dpkg_false.log"] non_expected_files = ["auth.log"] modification_time_filter = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.MODIFICATION_TIME, modification_time=rdfvalue.FileFinderModificationTimeFilter( min_last_modified_time=rdfvalue.RDFDatetime().FromSecondsFromEpoch( 1444444440))) for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): self.RunFlowAndCheckResults( action=action, filters=[modification_time_filter], expected_files=expected_files, non_expected_files=non_expected_files) def testAccessTimeFilterWithDifferentActions(self): expected_files = ["dpkg.log", "dpkg_false.log"] non_expected_files = ["auth.log"] access_time_filter = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.ACCESS_TIME, access_time=rdfvalue.FileFinderAccessTimeFilter( min_last_access_time=rdfvalue.RDFDatetime().FromSecondsFromEpoch( 1444444440))) for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): self.RunFlowAndCheckResults( action=action, filters=[access_time_filter], expected_files=expected_files, non_expected_files=non_expected_files) def testInodeChangeTimeFilterWithDifferentActions(self): expected_files = ["dpkg.log", "dpkg_false.log"] non_expected_files = ["auth.log"] inode_change_time_filter = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.INODE_CHANGE_TIME, inode_change_time= rdfvalue.FileFinderInodeChangeTimeFilter( min_last_inode_change_time= rdfvalue.RDFDatetime().FromSecondsFromEpoch(1444444440))) for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): self.RunFlowAndCheckResults( action=action, filters=[inode_change_time_filter], expected_files=expected_files, non_expected_files=non_expected_files) def testTreatsGlobsAsPathsWhenMemoryPathTypeIsUsed(self): # No need to setup VFS handlers as we're not actually looking at the files, # as there's no filter/action specified. paths = [os.path.join(os.path.dirname(self.base_path), "*.log"), os.path.join(os.path.dirname(self.base_path), "auth.log")] for _ in test_lib.TestFlowHelper( "FileFinder", self.client_mock, client_id=self.client_id, paths=paths, pathtype=rdfvalue.PathSpec.PathType.MEMORY, token=self.token, output=self.output_path): pass # Both auth.log and *.log should be present, because we don't apply # any filters and by default FileFinder treats given paths as paths # to memory devices when using PathType=MEMORY. So checking # files existence doesn't make much sense. self.CheckFilesInCollection(["*.log", "auth.log"]) def testAppliesLiteralFilterWhenMemoryPathTypeIsUsed(self): vfs.VFS_HANDLERS[ rdfvalue.PathSpec.PathType.OS] = test_lib.ClientTestDataVFSFixture vfs.VFS_HANDLERS[ rdfvalue.PathSpec.PathType.MEMORY] = test_lib.ClientTestDataVFSFixture paths = [os.path.join(os.path.dirname(self.base_path), "auth.log"), os.path.join(os.path.dirname(self.base_path), "dpkg.log")] literal_filter = rdfvalue.FileFinderFilter( filter_type=rdfvalue.FileFinderFilter.Type.CONTENTS_LITERAL_MATCH, contents_literal_match= rdfvalue.FileFinderContentsLiteralMatchFilter( mode=rdfvalue.FileFinderContentsLiteralMatchFilter.Mode.ALL_HITS, literal="session opened for user dearjohn")) # Check this filter with all the actions. This makes sense, as we may # download memeory or send it to the socket. for action in sorted(rdfvalue.FileFinderAction.Action.enum_dict.values()): for _ in test_lib.TestFlowHelper( "FileFinder", self.client_mock, client_id=self.client_id, paths=paths, pathtype=rdfvalue.PathSpec.PathType.MEMORY, filters=[literal_filter], action=rdfvalue.FileFinderAction( action_type=action), token=self.token, output=self.output_path): pass self.CheckFilesInCollection(["auth.log"]) fd = aff4.FACTORY.Open(self.client_id.Add(self.output_path), aff4_type="RDFValueCollection", token=self.token) self.assertEqual(fd[0].stat_entry.pathspec.CollapsePath(), paths[0]) self.assertEqual(len(fd), 1) self.assertEqual(len(fd[0].matches), 1) self.assertEqual(fd[0].matches[0].offset, 350) self.assertEqual(fd[0].matches[0].data, "session): session opened for user dearjohn by (uid=0")
# This file is part of Indico. # Copyright (C) 2002 - 2020 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from __future__ import absolute_import, unicode_literals import ast import codecs import os import re import socket import warnings from datetime import timedelta import pytz from celery.schedules import crontab from flask import current_app, g from flask.helpers import get_root_path from werkzeug.datastructures import ImmutableDict from werkzeug.urls import url_parse from indico.util.caching import make_hashable from indico.util.fs import resolve_link from indico.util.packaging import package_is_editable from indico.util.string import crc32, snakify DEFAULTS = { 'ATTACHMENT_STORAGE': 'default', 'AUTH_PROVIDERS': {}, 'BASE_URL': None, 'CACHE_BACKEND': 'files', 'CACHE_DIR': '/opt/indico/cache', 'CATEGORY_CLEANUP': {}, 'CELERY_BROKER': None, 'CELERY_CONFIG': {}, 'CELERY_RESULT_BACKEND': None, 'COMMUNITY_HUB_URL': 'https://hub.getindico.io', 'CUSTOMIZATION_DEBUG': False, 'CUSTOMIZATION_DIR': None, 'CUSTOM_COUNTRIES': {}, 'DB_LOG': False, 'DEBUG': False, 'DEFAULT_LOCALE': 'en_GB', 'DEFAULT_TIMEZONE': 'UTC', 'DISABLE_CELERY_CHECK': None, 'ENABLE_ROOMBOOKING': False, 'EXTERNAL_REGISTRATION_URL': None, 'FLOWER_URL': None, 'HELP_URL': 'https://learn.getindico.io', 'IDENTITY_PROVIDERS': {}, 'LOCAL_IDENTITIES': True, 'LOCAL_MODERATION': False, 'LOCAL_REGISTRATION': True, 'LOCAL_GROUPS': True, 'LOGGING_CONFIG_FILE': 'logging.yaml', 'LOGO_URL': None, 'LOG_DIR': '/opt/indico/log', 'MAX_UPLOAD_FILES_TOTAL_SIZE': 0, 'MAX_UPLOAD_FILE_SIZE': 0, 'MEMCACHED_SERVERS': [], 'NO_REPLY_EMAIL': None, 'PLUGINS': set(), 'PROFILE': False, 'PROVIDER_MAP': {}, 'PUBLIC_SUPPORT_EMAIL': None, 'REDIS_CACHE_URL': None, 'ROUTE_OLD_URLS': False, 'SCHEDULED_TASK_OVERRIDE': {}, 'SECRET_KEY': None, 'SENTRY_DSN': None, 'SENTRY_LOGGING_LEVEL': 'WARNING', 'SESSION_LIFETIME': 86400 * 31, 'SMTP_LOGIN': None, 'SMTP_PASSWORD': None, 'SMTP_SERVER': ('localhost', 25), 'SMTP_TIMEOUT': 30, 'SMTP_USE_CELERY': True, 'SMTP_USE_TLS': False, 'SQLALCHEMY_DATABASE_URI': None, 'SQLALCHEMY_MAX_OVERFLOW': 3, 'SQLALCHEMY_POOL_RECYCLE': 120, 'SQLALCHEMY_POOL_SIZE': 5, 'SQLALCHEMY_POOL_TIMEOUT': 10, 'STATIC_FILE_METHOD': None, 'STATIC_SITE_STORAGE': None, 'STORAGE_BACKENDS': {'default': 'fs:/opt/indico/archive'}, 'STRICT_LATEX': False, 'SUPPORT_EMAIL': None, 'TEMP_DIR': '/opt/indico/tmp', 'USE_PROXY': False, 'WORKER_NAME': socket.getfqdn(), 'XELATEX_PATH': None, } # Default values for settings that cannot be set in the config file INTERNAL_DEFAULTS = { 'CONFIG_PATH': os.devnull, 'CONFIG_PATH_RESOLVED': None, 'LOGGING_CONFIG_PATH': None, 'TESTING': False } def get_config_path(): """Get the path of the indico config file. This may return the location of a symlink. Resolving a link is up to the caller if needed. """ # In certain environments (debian+uwsgi+no-systemd) Indico may run # with an incorrect $HOME (such as /root), resulting in the config # files being searched in the wrong place. By clearing $HOME, Python # will get the home dir from passwd which has the correct path. old_home = os.environ.pop('HOME', None) # env var has priority try: return os.path.expanduser(os.environ['INDICO_CONFIG']) except KeyError: pass # try finding the config in various common paths paths = [os.path.expanduser('~/.indico.conf'), '/etc/indico.conf'] # Keeping HOME unset wouldn't be too bad but let's not have weird side-effects if old_home is not None: os.environ['HOME'] = old_home # If it's an editable setup (ie usually a dev instance) allow having # the config in the package's root path if package_is_editable('indico'): paths.insert(0, os.path.normpath(os.path.join(get_root_path('indico'), 'indico.conf'))) for path in paths: if os.path.exists(path): return path raise Exception('No indico config found. Point the INDICO_CONFIG env var to your config file or ' 'move/symlink the config in one of the following locations: {}'.format(', '.join(paths))) def _parse_config(path): globals_ = {'timedelta': timedelta, 'crontab': crontab} locals_ = {} with codecs.open(path, encoding='utf-8') as config_file: # XXX: unicode_literals is inherited from this file exec compile(config_file.read(), path, 'exec') in globals_, locals_ return {unicode(k if k.isupper() else _convert_key(k)): v for k, v in locals_.iteritems() if k[0] != '_'} def _convert_key(name): # camelCase to BIG_SNAKE while preserving acronyms, i.e. # FooBARs -> FOO_BARS (and not FOO_BA_RS) name = re.sub(r'([A-Z])([A-Z]+)', lambda m: m.group(1) + m.group(2).lower(), name) name = snakify(name).upper() special_cases = {'PDFLATEX_PROGRAM': 'XELATEX_PATH', 'IS_ROOM_BOOKING_ACTIVE': 'ENABLE_ROOMBOOKING'} return special_cases.get(name, name) def _postprocess_config(data): if data['DEFAULT_TIMEZONE'] not in pytz.all_timezones_set: raise ValueError('Invalid default timezone: {}'.format(data['DEFAULT_TIMEZONE'])) data['BASE_URL'] = data['BASE_URL'].rstrip('/') data['STATIC_SITE_STORAGE'] = data['STATIC_SITE_STORAGE'] or data['ATTACHMENT_STORAGE'] if data['DISABLE_CELERY_CHECK'] is None: data['DISABLE_CELERY_CHECK'] = data['DEBUG'] def _sanitize_data(data, allow_internal=False): allowed = set(DEFAULTS) if allow_internal: allowed |= set(INTERNAL_DEFAULTS) for key in set(data) - allowed: warnings.warn('Ignoring unknown config key {}'.format(key)) return {k: v for k, v in data.iteritems() if k in allowed} def load_config(only_defaults=False, override=None): """Load the configuration data. :param only_defaults: Whether to load only the default options, ignoring any user-specified config file or environment-based overrides. :param override: An optional dict with extra values to add to the configuration. Any values provided here will override values from the config file. """ data = dict(DEFAULTS, **INTERNAL_DEFAULTS) if not only_defaults: path = get_config_path() config = _sanitize_data(_parse_config(path)) data.update(config) env_override = os.environ.get('INDICO_CONF_OVERRIDE') if env_override: data.update(_sanitize_data(ast.literal_eval(env_override))) resolved_path = resolve_link(path) if os.path.islink(path) else path resolved_path = None if resolved_path == os.devnull else resolved_path data['CONFIG_PATH'] = path data['CONFIG_PATH_RESOLVED'] = resolved_path if resolved_path is not None: data['LOGGING_CONFIG_PATH'] = os.path.join(os.path.dirname(resolved_path), data['LOGGING_CONFIG_FILE']) if override: data.update(_sanitize_data(override, allow_internal=True)) _postprocess_config(data) return ImmutableDict(data) class IndicoConfig(object): """Wrapper for the Indico configuration. It exposes all config keys as read-only attributes. Dynamic configuration attributes whose value may change depending on other factors may be added as properties, but this should be kept to a minimum and is mostly there for legacy reasons. :param config: The dict containing the configuration data. If omitted, it is taken from the active flask application. An explicit configuration dict should not be specified except in special cases such as the initial app configuration where no app context is available yet. :param exc: The exception to raise when accessing an invalid config key. This allows using the expected kind of exception in most cases but overriding it when exposing settings to Jinja where the default :exc:`AttributeError` would silently be turned into an empty string. """ __slots__ = ('_config', '_exc') def __init__(self, config=None, exc=AttributeError): # yuck, but we don't allow writing to attributes directly object.__setattr__(self, '_config', config) object.__setattr__(self, '_exc', exc) @property def data(self): try: return self._config or current_app.config['INDICO'] except KeyError: raise RuntimeError('config not loaded') @property def hash(self): return crc32(repr(make_hashable(sorted(self.data.items())))) @property def CONFERENCE_CSS_TEMPLATES_BASE_URL(self): return self.BASE_URL + '/css/confTemplates' @property def IMAGES_BASE_URL(self): return 'static/images' if g.get('static_site') else url_parse('{}/images'.format(self.BASE_URL)).path @property def LATEX_ENABLED(self): return bool(self.XELATEX_PATH) def __getattr__(self, name): try: return self.data[name] except KeyError: raise self._exc('no such setting: ' + name) def __setattr__(self, key, value): raise AttributeError('cannot change config at runtime') def __delattr__(self, key): raise AttributeError('cannot change config at runtime') #: The global Indico configuration config = IndicoConfig()
import logging from operator import itemgetter import asyncio import time from typing import Optional, Dict, Callable from .streams import BinanceSocketManager from .threaded_stream import ThreadedApiManager class DepthCache(object): def __init__(self, symbol, conv_type=float): """Initialise the DepthCache :param symbol: Symbol to create depth cache for :type symbol: string :param conv_type: Optional type to represent price, and amount, default is float. :type conv_type: function. """ self.symbol = symbol self._bids = {} self._asks = {} self.update_time = None self.conv_type = conv_type self._log = logging.getLogger(__name__) def add_bid(self, bid): """Add a bid to the cache :param bid: :return: """ self._bids[bid[0]] = self.conv_type(bid[1]) if bid[1] == "0.00000000": del self._bids[bid[0]] def add_ask(self, ask): """Add an ask to the cache :param ask: :return: """ self._asks[ask[0]] = self.conv_type(ask[1]) if ask[1] == "0.00000000": del self._asks[ask[0]] def get_bids(self): """Get the current bids :return: list of bids with price and quantity as conv_type .. code-block:: python [ [ 0.0001946, # Price 45.0 # Quantity ], [ 0.00019459, 2384.0 ], [ 0.00019158, 5219.0 ], [ 0.00019157, 1180.0 ], [ 0.00019082, 287.0 ] ] """ return DepthCache.sort_depth(self._bids, reverse=True, conv_type=self.conv_type) def get_asks(self): """Get the current asks :return: list of asks with price and quantity as conv_type. .. code-block:: python [ [ 0.0001955, # Price 57.0' # Quantity ], [ 0.00019699, 778.0 ], [ 0.000197, 64.0 ], [ 0.00019709, 1130.0 ], [ 0.0001971, 385.0 ] ] """ return DepthCache.sort_depth(self._asks, reverse=False, conv_type=self.conv_type) @staticmethod def sort_depth(vals, reverse=False, conv_type=float): """Sort bids or asks by price """ if isinstance(vals, dict): lst = [[conv_type(price), conv_type(quantity)] for price, quantity in vals.items()] elif isinstance(vals, list): lst = [[conv_type(price), conv_type(quantity)] for price, quantity in vals] else: raise ValueError(f'Unknown order book depth data type: {type(vals)}') lst = sorted(lst, key=itemgetter(0), reverse=reverse) return lst class BaseDepthCacheManager: DEFAULT_REFRESH = 60 * 30 # 30 minutes TIMEOUT = 60 def __init__(self, client, symbol, loop=None, refresh_interval=None, bm=None, limit=10, conv_type=float): """Create a DepthCacheManager instance :param client: Binance API client :type client: binance.Client :param loop: :type loop: :param symbol: Symbol to create depth cache for :type symbol: string :param refresh_interval: Optional number of seconds between cache refresh, use 0 or None to disable :type refresh_interval: int :param bm: Optional BinanceSocketManager :type bm: BinanceSocketManager :param limit: Optional number of orders to get from orderbook :type limit: int :param conv_type: Optional type to represent price, and amount, default is float. :type conv_type: function. """ self._client = client self._depth_cache = None self._loop = loop or asyncio.get_event_loop() self._symbol = symbol self._limit = limit self._last_update_id = None self._bm = bm or BinanceSocketManager(self._client, self._loop) self._refresh_interval = refresh_interval or self.DEFAULT_REFRESH self._conn_key = None self._conv_type = conv_type self._log = logging.getLogger(__name__) async def __aenter__(self): await asyncio.gather( self._init_cache(), self._start_socket() ) await self._socket.__aenter__() return self async def __aexit__(self, *args, **kwargs): await self._socket.__aexit__(*args, **kwargs) async def recv(self): dc = None while not dc: try: res = await asyncio.wait_for(self._socket.recv(), timeout=self.TIMEOUT) except Exception as e: self._log.warning(e) else: dc = await self._depth_event(res) return dc async def _init_cache(self): """Initialise the depth cache calling REST endpoint :return: """ # initialise or clear depth cache self._depth_cache = DepthCache(self._symbol, conv_type=self._conv_type) # set a time to refresh the depth cache if self._refresh_interval: self._refresh_time = int(time.time()) + self._refresh_interval async def _start_socket(self): """Start the depth cache socket :return: """ self._socket = self._get_socket() def _get_socket(self): raise NotImplementedError async def _depth_event(self, msg): """Handle a depth event :param msg: :return: """ if not msg: return None if 'e' in msg and msg['e'] == 'error': # close the socket await self.close() # notify the user by returning a None value return None return await self._process_depth_message(msg) async def _process_depth_message(self, msg): """Process a depth event message. :param msg: Depth event message. :return: """ # add any bid or ask values self._apply_orders(msg) # call the callback with the updated depth cache res = self._depth_cache # after processing event see if we need to refresh the depth cache if self._refresh_interval and int(time.time()) > self._refresh_time: await self._init_cache() return res def _apply_orders(self, msg): for bid in msg.get('b', []) + msg.get('bids', []): self._depth_cache.add_bid(bid) for ask in msg.get('a', []) + msg.get('asks', []): self._depth_cache.add_ask(ask) # keeping update time self._depth_cache.update_time = msg.get('E') or msg.get('lastUpdateId') def get_depth_cache(self): """Get the current depth cache :return: DepthCache object """ return self._depth_cache async def close(self): """Close the open socket for this manager :return: """ self._depth_cache = None def get_symbol(self): """Get the symbol :return: symbol """ return self._symbol class DepthCacheManager(BaseDepthCacheManager): def __init__( self, client, symbol, loop=None, refresh_interval=None, bm=None, limit=500, conv_type=float, ws_interval=None ): """Initialise the DepthCacheManager :param client: Binance API client :type client: binance.Client :param loop: asyncio loop :param symbol: Symbol to create depth cache for :type symbol: string :param refresh_interval: Optional number of seconds between cache refresh, use 0 or None to disable :type refresh_interval: int :param limit: Optional number of orders to get from orderbook :type limit: int :param conv_type: Optional type to represent price, and amount, default is float. :type conv_type: function. :param ws_interval: Optional interval for updates on websocket, default None. If not set, updates happen every second. Must be 0, None (1s) or 100 (100ms). :type ws_interval: int """ super().__init__(client, symbol, loop, refresh_interval, bm, limit, conv_type) self._ws_interval = ws_interval async def _init_cache(self): """Initialise the depth cache calling REST endpoint :return: """ self._last_update_id = None self._depth_message_buffer = [] res = await self._client.get_order_book(symbol=self._symbol, limit=self._limit) # initialise or clear depth cache await super()._init_cache() # process bid and asks from the order book self._apply_orders(res) for bid in res['bids']: self._depth_cache.add_bid(bid) for ask in res['asks']: self._depth_cache.add_ask(ask) # set first update id self._last_update_id = res['lastUpdateId'] # Apply any updates from the websocket for msg in self._depth_message_buffer: await self._process_depth_message(msg) # clear the depth buffer self._depth_message_buffer = [] async def _start_socket(self): """Start the depth cache socket :return: """ if not getattr(self, '_depth_message_buffer', None): self._depth_message_buffer = [] await super()._start_socket() def _get_socket(self): return self._bm.depth_socket(self._symbol, interval=self._ws_interval) async def _process_depth_message(self, msg): """Process a depth event message. :param msg: Depth event message. :return: """ if self._last_update_id is None: # Initial depth snapshot fetch not yet performed, buffer messages self._depth_message_buffer.append(msg) return if msg['u'] <= self._last_update_id: # ignore any updates before the initial update id return elif msg['U'] != self._last_update_id + 1: # if not buffered check we get sequential updates # otherwise init cache again await self._init_cache() # add any bid or ask values self._apply_orders(msg) # call the callback with the updated depth cache res = self._depth_cache self._last_update_id = msg['u'] # after processing event see if we need to refresh the depth cache if self._refresh_interval and int(time.time()) > self._refresh_time: await self._init_cache() return res class FuturesDepthCacheManager(BaseDepthCacheManager): async def _process_depth_message(self, msg): """Process a depth event message. :param msg: Depth event message. :return: """ msg = msg.get('data') return await super()._process_depth_message(msg) def _apply_orders(self, msg): self._depth_cache._bids = msg.get('b', []) self._depth_cache._asks = msg.get('a', []) # keeping update time self._depth_cache.update_time = msg.get('E') or msg.get('lastUpdateId') def _get_socket(self): sock = self._bm.futures_depth_socket(self._symbol) return sock class OptionsDepthCacheManager(BaseDepthCacheManager): def _get_socket(self): return self._bm.options_depth_socket(self._symbol) class ThreadedDepthCacheManager(ThreadedApiManager): def __init__( self, api_key: Optional[str] = None, api_secret: Optional[str] = None, requests_params: Dict[str, str] = None, tld: str = 'com', testnet: bool = False ): super().__init__(api_key, api_secret, requests_params, tld, testnet) def _start_depth_cache( self, dcm_class, callback: Callable, symbol: str, refresh_interval=None, bm=None, limit=10, conv_type=float, **kwargs ) -> str: while not self._client: time.sleep(0.01) dcm = dcm_class( client=self._client, symbol=symbol, loop=self._loop, refresh_interval=refresh_interval, bm=bm, limit=limit, conv_type=conv_type, **kwargs ) path = symbol.lower() + '@depth' + str(limit) self._socket_running[path] = True self._loop.call_soon(asyncio.create_task, self.start_listener(dcm, path, callback)) return path def start_depth_cache( self, callback: Callable, symbol: str, refresh_interval=None, bm=None, limit=10, conv_type=float, ws_interval=0 ) -> str: return self._start_depth_cache( dcm_class=DepthCacheManager, callback=callback, symbol=symbol, refresh_interval=refresh_interval, bm=bm, limit=limit, conv_type=conv_type, ws_interval=ws_interval ) def start_futures_depth_socket( self, callback: Callable, symbol: str, refresh_interval=None, bm=None, limit=10, conv_type=float ) -> str: return self._start_depth_cache( dcm_class=FuturesDepthCacheManager, callback=callback, symbol=symbol, refresh_interval=refresh_interval, bm=bm, limit=limit, conv_type=conv_type ) def start_options_depth_socket( self, callback: Callable, symbol: str, refresh_interval=None, bm=None, limit=10, conv_type=float ) -> str: return self._start_depth_cache( dcm_class=OptionsDepthCacheManager, callback=callback, symbol=symbol, refresh_interval=refresh_interval, bm=bm, limit=limit, conv_type=conv_type )
# -*- coding: utf-8 -*- import datetime import httplib as http from flask import request from modularodm import Q from modularodm.exceptions import NoResultsFound from modularodm.exceptions import ValidationValueError import framework.auth from framework.auth import cas from framework import forms, status from framework.flask import redirect # VOL-aware redirect from framework.auth import exceptions from framework.exceptions import HTTPError from framework.auth import (logout, get_user, DuplicateEmailError) from framework.auth.decorators import collect_auth, must_be_logged_in from framework.auth.forms import ( MergeAccountForm, RegistrationForm, ResendConfirmationForm, ResetPasswordForm, ForgotPasswordForm ) from website import settings from website import mails from website import language from website import security from website.models import User from website.util import web_url_for from website.util.sanitize import strip_html @collect_auth def reset_password(auth, **kwargs): if auth.logged_in: return auth_logout(redirect_url=request.url) verification_key = kwargs['verification_key'] form = ResetPasswordForm(request.form) user_obj = get_user(verification_key=verification_key) if not user_obj: error_data = {'message_short': 'Invalid url.', 'message_long': 'The verification key in the URL is invalid or ' 'has expired.'} raise HTTPError(400, data=error_data) if request.method == 'POST' and form.validate(): # new random verification key, allows CAS to authenticate the user w/o password one time only. user_obj.verification_key = security.random_string(20) user_obj.set_password(form.password.data) user_obj.save() status.push_status_message('Password reset', 'success') # Redirect to CAS and authenticate the user with a verification key. return redirect(cas.get_login_url( web_url_for('user_account', _absolute=True), auto=True, username=user_obj.username, verification_key=user_obj.verification_key )) forms.push_errors_to_status(form.errors) return { 'verification_key': verification_key, } def forgot_password_post(): """Attempt to send user password reset or return respective error. """ form = ForgotPasswordForm(request.form, prefix='forgot_password') if form.validate(): email = form.email.data user_obj = get_user(email=email) if user_obj: user_obj.verification_key = security.random_string(20) user_obj.save() reset_link = "http://{0}{1}".format( request.host, web_url_for( 'reset_password', verification_key=user_obj.verification_key ) ) mails.send_mail( to_addr=email, mail=mails.FORGOT_PASSWORD, reset_link=reset_link ) status.push_status_message( ('If there is an OSF account associated with {0}, an email with instructions on how to reset ' 'the OSF password has been sent to {0}. If you do not receive an email and believe you should ' 'have, please contact OSF Support. ').format(email), 'success') forms.push_errors_to_status(form.errors) return auth_login(forgot_password_form=form) @collect_auth def forgot_password_get(auth, *args, **kwargs): """Return forgot password page upon. """ if auth.logged_in: return redirect(web_url_for('dashboard')) return {} ############################################################################### # Log in ############################################################################### # TODO: Rewrite async @collect_auth def auth_login(auth, **kwargs): """If GET request, show login page. If POST, attempt to log user in if login form passsed; else send forgot password email. """ next_url = request.args.get('next') if auth.logged_in: if not request.args.get('logout'): if next_url: return redirect(next_url) return redirect(web_url_for('dashboard')) # redirect user to CAS for logout, return here w/o authentication return auth_logout(redirect_url=request.url) if kwargs.get('first', False): status.push_status_message('You may now log in', 'info') status_message = request.args.get('status', '') if status_message == 'expired': status.push_status_message('The private link you used is expired.') code = http.OK if next_url: status.push_status_message(language.MUST_LOGIN) # Don't raise error if user is being logged out if not request.args.get('logout'): code = http.UNAUTHORIZED # set login_url to form action, upon successful authentication specifically w/o logout=True, # allows for next to be followed or a redirect to the dashboard. redirect_url = web_url_for('auth_login', next=next_url, _absolute=True) login_url = cas.get_login_url(redirect_url, auto=True) return {'login_url': login_url}, code def auth_logout(redirect_url=None): """Log out and delete cookie. """ redirect_url = redirect_url or request.args.get('redirect_url') or web_url_for('goodbye', _absolute=True) logout() if 'reauth' in request.args: cas_endpoint = cas.get_login_url(redirect_url) else: cas_endpoint = cas.get_logout_url(redirect_url) resp = redirect(cas_endpoint) resp.delete_cookie(settings.COOKIE_NAME) return resp @collect_auth def confirm_email_get(token, auth=None, **kwargs): """View for email confirmation links. Authenticates and redirects to user settings page if confirmation is successful, otherwise shows an "Expired Link" error. methods: GET """ user = User.load(kwargs['uid']) is_merge = 'confirm_merge' in request.args is_initial_confirmation = not user.date_confirmed if user is None: raise HTTPError(http.NOT_FOUND) if auth and auth.user and auth.user in (user, user.merged_by): if not is_merge: status.push_status_message(language.WELCOME_MESSAGE, 'default', jumbotron=True) # Go to dashboard return redirect(web_url_for('dashboard')) status.push_status_message(language.MERGE_COMPLETE, 'success') return redirect(web_url_for('user_account')) try: user.confirm_email(token, merge=is_merge) except exceptions.EmailConfirmTokenError as e: raise HTTPError(http.BAD_REQUEST, data={ 'message_short': e.message_short, 'message_long': e.message_long }) if is_initial_confirmation: user.date_last_login = datetime.datetime.utcnow() user.save() # Send out our welcome message mails.send_mail( to_addr=user.username, mail=mails.WELCOME, mimetype='html', user=user ) # Redirect to CAS and authenticate the user with a verification key. user.verification_key = security.random_string(20) user.save() return redirect(cas.get_login_url( request.url, auto=True, username=user.username, verification_key=user.verification_key )) def send_confirm_email(user, email): """Sends a confirmation email to `user` to a given email. :raises: KeyError if user does not have a confirmation token for the given email. """ confirmation_url = user.get_confirmation_url( email, external=True, force=True, ) try: merge_target = User.find_one(Q('emails', 'eq', email)) except NoResultsFound: merge_target = None mails.send_mail( email, mails.CONFIRM_MERGE if merge_target else mails.CONFIRM_EMAIL, 'plain', user=user, confirmation_url=confirmation_url, email=email, merge_target=merge_target, ) def register_user(**kwargs): """Register new user account. :param-json str email1: :param-json str email2: :param-json str password: :param-json str fullName: :raises: HTTPError(http.BAD_REQUEST) if validation fails or user already exists """ # Verify email address match json_data = request.get_json() if str(json_data['email1']).lower() != str(json_data['email2']).lower(): raise HTTPError( http.BAD_REQUEST, data=dict(message_long='Email addresses must match.') ) try: full_name = request.json['fullName'] full_name = strip_html(full_name) user = framework.auth.register_unconfirmed( request.json['email1'], request.json['password'], full_name, ) framework.auth.signals.user_registered.send(user) except (ValidationValueError, DuplicateEmailError): raise HTTPError( http.BAD_REQUEST, data=dict( message_long=language.ALREADY_REGISTERED.format( email=request.json['email1'] ) ) ) if settings.CONFIRM_REGISTRATIONS_BY_EMAIL: send_confirm_email(user, email=user.username) message = language.REGISTRATION_SUCCESS.format(email=user.username) return {'message': message} else: return {'message': 'You may now log in.'} # TODO: Remove me def auth_register_post(): if not settings.ALLOW_REGISTRATION: status.push_status_message(language.REGISTRATION_UNAVAILABLE) return redirect('/') form = RegistrationForm(request.form, prefix='register') # Process form if form.validate(): try: user = framework.auth.register_unconfirmed( form.username.data, form.password.data, form.fullname.data) framework.auth.signals.user_registered.send(user) except (ValidationValueError, DuplicateEmailError): status.push_status_message( language.ALREADY_REGISTERED.format(email=form.username.data)) return auth_login() if user: if settings.CONFIRM_REGISTRATIONS_BY_EMAIL: send_confirm_email(user, email=user.username) message = language.REGISTRATION_SUCCESS.format(email=user.username) status.push_status_message(message, 'success') return auth_login() else: return redirect('/login/first/') else: forms.push_errors_to_status(form.errors) return auth_login() def merge_user_get(**kwargs): '''Web view for merging an account. Renders the form for confirmation. ''' return forms.utils.jsonify(MergeAccountForm()) def resend_confirmation(): """View for resending an email confirmation email. """ form = ResendConfirmationForm(request.form) if request.method == 'POST': if form.validate(): clean_email = form.email.data user = get_user(email=clean_email) if not user: return {'form': form} try: send_confirm_email(user, clean_email) except KeyError: # already confirmed, redirect to dashboard status_message = 'Email has already been confirmed.' type_ = 'warning' else: status_message = 'Resent email to <em>{0}</em>'.format(clean_email) type_ = 'success' status.push_status_message(status_message, type_) else: forms.push_errors_to_status(form.errors) # Don't go anywhere return {'form': form} # TODO: shrink me @must_be_logged_in def merge_user_post(auth, **kwargs): '''View for merging an account. Takes either JSON or form data. Request data should include a "merged_username" and "merged_password" properties for the account to be merged in. ''' master = auth.user if request.json: merged_username = request.json.get("merged_username") merged_password = request.json.get("merged_password") else: form = MergeAccountForm(request.form) if not form.validate(): forms.push_errors_to_status(form.errors) return merge_user_get(**kwargs) master_password = form.user_password.data if not master.check_password(master_password): status.push_status_message("Could not authenticate. Please check your username and password.") return merge_user_get(**kwargs) merged_username = form.merged_username.data merged_password = form.merged_password.data try: merged_user = User.find_one(Q("username", "eq", merged_username)) except NoResultsFound: status.push_status_message("Could not find that user. Please check the username and password.") return merge_user_get(**kwargs) if master and merged_user: if merged_user.check_password(merged_password): master.merge_user(merged_user) master.save() if request.form: status.push_status_message("Successfully merged {0} with this account".format(merged_username), 'success') return redirect("/settings/") return {"status": "success"} else: status.push_status_message("Could not find that user. Please check the username and password.") return merge_user_get(**kwargs) else: raise HTTPError(http.BAD_REQUEST) # TODO: Is this used? def auth_registerbeta(): return redirect('/account')
#------------------------------------------------------------------------------ # pycparser: c_lexer.py # # CLexer class: lexer for the C language # # Copyright (C) 2008-2015, Eli Bendersky # License: BSD #------------------------------------------------------------------------------ import re import sys from .ply import lex from .ply.lex import TOKEN class CLexer(object): """ A lexer for the C language. After building it, set the input text with input(), and call token() to get new tokens. The public attribute filename can be set to an initial filaneme, but the lexer will update it upon #line directives. """ def __init__(self, error_func, on_lbrace_func, on_rbrace_func, type_lookup_func): """ Create a new Lexer. error_func: An error function. Will be called with an error message, line and column as arguments, in case of an error during lexing. on_lbrace_func, on_rbrace_func: Called when an LBRACE or RBRACE is encountered (likely to push/pop type_lookup_func's scope) type_lookup_func: A type lookup function. Given a string, it must return True IFF this string is a name of a type that was defined with a typedef earlier. """ self.error_func = error_func self.on_lbrace_func = on_lbrace_func self.on_rbrace_func = on_rbrace_func self.type_lookup_func = type_lookup_func self.filename = '' # Keeps track of the last token returned from self.token() self.last_token = None # Allow either "# line" or "# <num>" to support GCC's # cpp output # self.line_pattern = re.compile('([ \t]*line\W)|([ \t]*\d+)') self.pragma_pattern = re.compile('[ \t]*pragma\W') def build(self, **kwargs): """ Builds the lexer from the specification. Must be called after the lexer object is created. This method exists separately, because the PLY manual warns against calling lex.lex inside __init__ """ self.lexer = lex.lex(object=self, **kwargs) def reset_lineno(self): """ Resets the internal line number counter of the lexer. """ self.lexer.lineno = 1 def input(self, text): self.lexer.input(text) def token(self): self.last_token = self.lexer.token() return self.last_token def find_tok_column(self, token): """ Find the column of the token in its line. """ last_cr = self.lexer.lexdata.rfind('\n', 0, token.lexpos) return token.lexpos - last_cr ######################-- PRIVATE --###################### ## ## Internal auxiliary methods ## def _error(self, msg, token): location = self._make_tok_location(token) self.error_func(msg, location[0], location[1]) self.lexer.skip(1) def _make_tok_location(self, token): return (token.lineno, self.find_tok_column(token)) ## ## Reserved keywords ## keywords = ( '_BOOL', '_COMPLEX', 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE', 'ELSE', 'ENUM', 'EXTERN', 'FLOAT', 'FOR', 'GOTO', 'IF', 'INLINE', 'INT', 'LONG', 'REGISTER', 'OFFSETOF', 'RESTRICT', 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT', 'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID', 'VOLATILE', 'WHILE', ) keyword_map = {} for keyword in keywords: if keyword == '_BOOL': keyword_map['_Bool'] = keyword elif keyword == '_COMPLEX': keyword_map['_Complex'] = keyword else: keyword_map[keyword.lower()] = keyword ## ## All the tokens recognized by the lexer ## tokens = keywords + ( # Identifiers 'ID', # Type identifiers (identifiers previously defined as # types with typedef) 'TYPEID', # constants 'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'INT_CONST_BIN', 'FLOAT_CONST', 'HEX_FLOAT_CONST', 'CHAR_CONST', 'WCHAR_CONST', # String literals 'STRING_LITERAL', 'WSTRING_LITERAL', # Operators 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD', 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', 'LOR', 'LAND', 'LNOT', 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', # Assignment 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', # Increment/decrement 'PLUSPLUS', 'MINUSMINUS', # Structure dereference (->) 'ARROW', # Conditional operator (?) 'CONDOP', # Delimeters 'LPAREN', 'RPAREN', # ( ) 'LBRACKET', 'RBRACKET', # [ ] 'LBRACE', 'RBRACE', # { } 'COMMA', 'PERIOD', # . , 'SEMI', 'COLON', # ; : # Ellipsis (...) 'ELLIPSIS', # pre-processor 'PPHASH', # '#' ) ## ## Regexes for use in tokens ## ## # valid C identifiers (K&R2: A.2.3), plus '$' (supported by some compilers) identifier = r'[a-zA-Z_$][0-9a-zA-Z_$]*' hex_prefix = '0[xX]' hex_digits = '[0-9a-fA-F]+' bin_prefix = '0[bB]' bin_digits = '[01]+' # integer constants (K&R2: A.2.5.1) integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?' decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')' octal_constant = '0[0-7]*'+integer_suffix_opt hex_constant = hex_prefix+hex_digits+integer_suffix_opt bin_constant = bin_prefix+bin_digits+integer_suffix_opt bad_octal_constant = '0[0-7]*[89]' # character constants (K&R2: A.2.5.2) # Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line # directives with Windows paths as filenames (..\..\dir\file) # For the same reason, decimal_escape allows all digit sequences. We want to # parse all correct code, even if it means to sometimes parse incorrect # code. # simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])""" decimal_escape = r"""(\d+)""" hex_escape = r"""(x[0-9a-fA-F]+)""" bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])""" escape_sequence = r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))' cconst_char = r"""([^'\\\n]|"""+escape_sequence+')' char_const = "'"+cconst_char+"'" wchar_const = 'L'+char_const unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)" bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')""" # string literals (K&R2: A.2.6) string_char = r"""([^"\\\n]|"""+escape_sequence+')' string_literal = '"'+string_char+'*"' wstring_literal = 'L'+string_literal bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"' # floating constants (K&R2: A.2.5.3) exponent_part = r"""([eE][-+]?[0-9]+)""" fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)""" floating_constant = '(((('+fractional_constant+')'+exponent_part+'?)|([0-9]+'+exponent_part+'))[FfLl]?)' binary_exponent_part = r'''([pP][+-]?[0-9]+)''' hex_fractional_constant = '((('+hex_digits+r""")?\."""+hex_digits+')|('+hex_digits+r"""\.))""" hex_floating_constant = '('+hex_prefix+'('+hex_digits+'|'+hex_fractional_constant+')'+binary_exponent_part+'[FfLl]?)' ## ## Lexer states: used for preprocessor \n-terminated directives ## states = ( # ppline: preprocessor line directives # ('ppline', 'exclusive'), # pppragma: pragma # ('pppragma', 'exclusive'), ) def t_PPHASH(self, t): r'[ \t]*\#' if self.line_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos): t.lexer.begin('ppline') self.pp_line = self.pp_filename = None elif self.pragma_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos): t.lexer.begin('pppragma') else: t.type = 'PPHASH' return t ## ## Rules for the ppline state ## @TOKEN(string_literal) def t_ppline_FILENAME(self, t): if self.pp_line is None: self._error('filename before line number in #line', t) else: self.pp_filename = t.value.lstrip('"').rstrip('"') @TOKEN(decimal_constant) def t_ppline_LINE_NUMBER(self, t): if self.pp_line is None: self.pp_line = t.value else: # Ignore: GCC's cpp sometimes inserts a numeric flag # after the file name pass def t_ppline_NEWLINE(self, t): r'\n' if self.pp_line is None: self._error('line number missing in #line', t) else: self.lexer.lineno = int(self.pp_line) if self.pp_filename is not None: self.filename = self.pp_filename t.lexer.begin('INITIAL') def t_ppline_PPLINE(self, t): r'line' pass t_ppline_ignore = ' \t' def t_ppline_error(self, t): self._error('invalid #line directive', t) ## ## Rules for the pppragma state ## def t_pppragma_NEWLINE(self, t): r'\n' t.lexer.lineno += 1 t.lexer.begin('INITIAL') def t_pppragma_PPPRAGMA(self, t): r'pragma' pass t_pppragma_ignore = ' \t<>.-{}();=+-*/$%@&^~!?:,0123456789' @TOKEN(string_literal) def t_pppragma_STR(self, t): pass @TOKEN(identifier) def t_pppragma_ID(self, t): pass def t_pppragma_error(self, t): self._error('invalid #pragma directive', t) ## ## Rules for the normal state ## t_ignore = ' \t' # Newlines def t_NEWLINE(self, t): r'\n+' t.lexer.lineno += t.value.count("\n") # Operators t_PLUS = r'\+' t_MINUS = r'-' t_TIMES = r'\*' t_DIVIDE = r'/' t_MOD = r'%' t_OR = r'\|' t_AND = r'&' t_NOT = r'~' t_XOR = r'\^' t_LSHIFT = r'<<' t_RSHIFT = r'>>' t_LOR = r'\|\|' t_LAND = r'&&' t_LNOT = r'!' t_LT = r'<' t_GT = r'>' t_LE = r'<=' t_GE = r'>=' t_EQ = r'==' t_NE = r'!=' # Assignment operators t_EQUALS = r'=' t_TIMESEQUAL = r'\*=' t_DIVEQUAL = r'/=' t_MODEQUAL = r'%=' t_PLUSEQUAL = r'\+=' t_MINUSEQUAL = r'-=' t_LSHIFTEQUAL = r'<<=' t_RSHIFTEQUAL = r'>>=' t_ANDEQUAL = r'&=' t_OREQUAL = r'\|=' t_XOREQUAL = r'\^=' # Increment/decrement t_PLUSPLUS = r'\+\+' t_MINUSMINUS = r'--' # -> t_ARROW = r'->' # ? t_CONDOP = r'\?' # Delimeters t_LPAREN = r'\(' t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' t_COMMA = r',' t_PERIOD = r'\.' t_SEMI = r';' t_COLON = r':' t_ELLIPSIS = r'\.\.\.' # Scope delimiters # To see why on_lbrace_func is needed, consider: # typedef char TT; # void foo(int TT) { TT = 10; } # TT x = 5; # Outside the function, TT is a typedef, but inside (starting and ending # with the braces) it's a parameter. The trouble begins with yacc's # lookahead token. If we open a new scope in brace_open, then TT has # already been read and incorrectly interpreted as TYPEID. So, we need # to open and close scopes from within the lexer. # Similar for the TT immediately outside the end of the function. # @TOKEN(r'\{') def t_LBRACE(self, t): self.on_lbrace_func() return t @TOKEN(r'\}') def t_RBRACE(self, t): self.on_rbrace_func() return t t_STRING_LITERAL = string_literal # The following floating and integer constants are defined as # functions to impose a strict order (otherwise, decimal # is placed before the others because its regex is longer, # and this is bad) # @TOKEN(floating_constant) def t_FLOAT_CONST(self, t): return t @TOKEN(hex_floating_constant) def t_HEX_FLOAT_CONST(self, t): return t @TOKEN(hex_constant) def t_INT_CONST_HEX(self, t): return t @TOKEN(bin_constant) def t_INT_CONST_BIN(self, t): return t @TOKEN(bad_octal_constant) def t_BAD_CONST_OCT(self, t): msg = "Invalid octal constant" self._error(msg, t) @TOKEN(octal_constant) def t_INT_CONST_OCT(self, t): return t @TOKEN(decimal_constant) def t_INT_CONST_DEC(self, t): return t # Must come before bad_char_const, to prevent it from # catching valid char constants as invalid # @TOKEN(char_const) def t_CHAR_CONST(self, t): return t @TOKEN(wchar_const) def t_WCHAR_CONST(self, t): return t @TOKEN(unmatched_quote) def t_UNMATCHED_QUOTE(self, t): msg = "Unmatched '" self._error(msg, t) @TOKEN(bad_char_const) def t_BAD_CHAR_CONST(self, t): msg = "Invalid char constant %s" % t.value self._error(msg, t) @TOKEN(wstring_literal) def t_WSTRING_LITERAL(self, t): return t # unmatched string literals are caught by the preprocessor @TOKEN(bad_string_literal) def t_BAD_STRING_LITERAL(self, t): msg = "String contains invalid escape code" self._error(msg, t) @TOKEN(identifier) def t_ID(self, t): t.type = self.keyword_map.get(t.value, "ID") if t.type == 'ID' and self.type_lookup_func(t.value): t.type = "TYPEID" return t def t_error(self, t): msg = 'Illegal character %s' % repr(t.value[0]) self._error(msg, t)
# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import operator import os import mock import unittest from contextlib import contextmanager from shutil import rmtree from StringIO import StringIO from tempfile import mkdtemp from eventlet import spawn, Timeout, listen import simplejson from swift.common.swob import Request, HeaderKeyDict import swift.container from swift.container import server as container_server from swift.common.utils import normalize_timestamp, mkdirs, public, replication from test.unit import fake_http_connect @contextmanager def save_globals(): orig_http_connect = getattr(swift.container.server, 'http_connect', None) try: yield True finally: swift.container.server.http_connect = orig_http_connect class TestContainerController(unittest.TestCase): """ Test swift.container_server.ContainerController """ def setUp(self): """ Set up for testing swift.object_server.ObjectController """ self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_server_ObjectController') mkdirs(self.testdir) rmtree(self.testdir) mkdirs(os.path.join(self.testdir, 'sda1')) mkdirs(os.path.join(self.testdir, 'sda1', 'tmp')) self.controller = container_server.ContainerController( {'devices': self.testdir, 'mount_check': 'false'}) def tearDown(self): """ Tear down for testing swift.object_server.ObjectController """ rmtree(os.path.dirname(self.testdir), ignore_errors=1) def test_acl_container(self): # Ensure no acl by default req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '0'}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) response = self.controller.HEAD(req) self.assert_(response.status.startswith('204')) self.assert_('x-container-read' not in response.headers) self.assert_('x-container-write' not in response.headers) # Ensure POSTing acls works req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': '1', 'X-Container-Read': '.r:*', 'X-Container-Write': 'account:user'}) self.controller.POST(req) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) response = self.controller.HEAD(req) self.assert_(response.status.startswith('204')) self.assertEquals(response.headers.get('x-container-read'), '.r:*') self.assertEquals(response.headers.get('x-container-write'), 'account:user') # Ensure we can clear acls on POST req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': '3', 'X-Container-Read': '', 'X-Container-Write': ''}) self.controller.POST(req) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) response = self.controller.HEAD(req) self.assert_(response.status.startswith('204')) self.assert_('x-container-read' not in response.headers) self.assert_('x-container-write' not in response.headers) # Ensure PUTing acls works req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '4', 'X-Container-Read': '.r:*', 'X-Container-Write': 'account:user'}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'HEAD'}) response = self.controller.HEAD(req) self.assert_(response.status.startswith('204')) self.assertEquals(response.headers.get('x-container-read'), '.r:*') self.assertEquals(response.headers.get('x-container-write'), 'account:user') def test_HEAD(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) self.controller.PUT(req) response = self.controller.HEAD(req) self.assert_(response.status.startswith('204')) self.assertEquals(int(response.headers['x-container-bytes-used']), 0) self.assertEquals(int(response.headers['x-container-object-count']), 0) req2 = Request.blank('/sda1/p/a/c/o', environ={ 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_SIZE': 42, 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x'}) self.controller.PUT(req2) response = self.controller.HEAD(req) self.assertEquals(int(response.headers['x-container-bytes-used']), 42) self.assertEquals(int(response.headers['x-container-object-count']), 1) def test_HEAD_not_found(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 404) def test_HEAD_invalid_partition(self): req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'HEAD', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 400) def test_HEAD_insufficient_storage(self): self.controller = container_server.ContainerController( {'devices': self.testdir}) req = Request.blank('/sda-null/p/a/c', environ={'REQUEST_METHOD': 'HEAD', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 507) def test_HEAD_invalid_content_type(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}, headers={'Accept': 'application/plain'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 406) def test_HEAD_invalid_format(self): format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D) req = Request.blank('/sda1/p/a/c?format=' + format, environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.controller) self.assertEquals(resp.status_int, 400) def test_PUT(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '2'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) def test_PUT_obj_not_found(self): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1', 'X-Size': '0', 'X-Content-Type': 'text/plain', 'X-ETag': 'e'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 404) def test_PUT_GET_metadata(self): # Set metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(1), 'X-Container-Meta-Test': 'Value'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') # Set another metadata header, ensuring old one doesn't disappear req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(1), 'X-Container-Meta-Test2': 'Value2'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') self.assertEquals(resp.headers.get('x-container-meta-test2'), 'Value2') # Update metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(3), 'X-Container-Meta-Test': 'New Value'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'New Value') # Send old update to metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(2), 'X-Container-Meta-Test': 'Old Value'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'New Value') # Remove metadata header (by setting it to empty) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(4), 'X-Container-Meta-Test': ''}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) req = Request.blank('/sda1/p/a/c') resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) self.assert_('x-container-meta-test' not in resp.headers) def test_PUT_invalid_partition(self): req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 400) def test_PUT_timestamp_not_float(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': 'not-float'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 400) def test_PUT_insufficient_storage(self): self.controller = container_server.ContainerController( {'devices': self.testdir}) req = Request.blank('/sda-null/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 507) def test_POST_HEAD_metadata(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': normalize_timestamp(1)}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) # Set metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(1), 'X-Container-Meta-Test': 'Value'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value') # Update metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(3), 'X-Container-Meta-Test': 'New Value'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'New Value') # Send old update to metadata header req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(2), 'X-Container-Meta-Test': 'Old Value'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assertEquals(resp.headers.get('x-container-meta-test'), 'New Value') # Remove metadata header (by setting it to empty) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': normalize_timestamp(4), 'X-Container-Meta-Test': ''}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}) resp = self.controller.HEAD(req) self.assertEquals(resp.status_int, 204) self.assert_('x-container-meta-test' not in resp.headers) def test_POST_invalid_partition(self): req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'POST', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 400) def test_POST_timestamp_not_float(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': 'not-float'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 400) def test_POST_insufficient_storage(self): self.controller = container_server.ContainerController( {'devices': self.testdir}) req = Request.blank('/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 507) def test_POST_invalid_container_sync_to(self): self.controller = container_server.ContainerController( {'devices': self.testdir}) req = Request.blank('/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST', 'HTTP_X_TIMESTAMP': '1'}, headers={'x-container-sync-to': '192.168.0.1'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 400) def test_POST_after_DELETE_not_found(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '2'}) self.controller.DELETE(req) req = Request.blank('/sda1/p/a/c/', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': '3'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 404) def test_DELETE_obj_not_found(self): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '1'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 404) def test_DELETE_container_not_found(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 404) def test_PUT_utf8(self): snowman = u'\u2603' container_name = snowman.encode('utf-8') req = Request.blank('/sda1/p/a/%s' % container_name, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) def test_account_update_mismatched_host_device(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}, headers={'X-Timestamp': '0000000001.00000', 'X-Account-Host': '127.0.0.1:0', 'X-Account-Partition': '123', 'X-Account-Device': 'sda1,sda2'}) broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c') resp = self.controller.account_update(req, 'a', 'c', broker) self.assertEquals(resp.status_int, 400) def test_account_update_account_override_deleted(self): bindsock = listen(('127.0.0.1', 0)) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1'}, headers={'X-Timestamp': '0000000001.00000', 'X-Account-Host': '%s:%s' % bindsock.getsockname(), 'X-Account-Partition': '123', 'X-Account-Device': 'sda1', 'X-Account-Override-Deleted': 'yes'}) with save_globals(): new_connect = fake_http_connect(200, count=123) swift.container.server.http_connect = new_connect resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) def test_PUT_account_update(self): bindsock = listen(('127.0.0.1', 0)) def accept(return_code, expected_timestamp): try: with Timeout(3): sock, addr = bindsock.accept() inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() self.assertEquals(inc.readline(), 'PUT /sda1/123/a/c HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() self.assertEquals(headers['x-put-timestamp'], expected_timestamp) except BaseException, err: return err return None req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '0000000001.00000', 'X-Account-Host': '%s:%s' % bindsock.getsockname(), 'X-Account-Partition': '123', 'X-Account-Device': 'sda1'}) event = spawn(accept, 201, '0000000001.00000') try: with Timeout(3): resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) finally: err = event.wait() if err: raise Exception(err) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '2'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '0000000003.00000', 'X-Account-Host': '%s:%s' % bindsock.getsockname(), 'X-Account-Partition': '123', 'X-Account-Device': 'sda1'}) event = spawn(accept, 404, '0000000003.00000') try: with Timeout(3): resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 404) finally: err = event.wait() if err: raise Exception(err) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '0000000005.00000', 'X-Account-Host': '%s:%s' % bindsock.getsockname(), 'X-Account-Partition': '123', 'X-Account-Device': 'sda1'}) event = spawn(accept, 503, '0000000005.00000') got_exc = False try: with Timeout(3): resp = self.controller.PUT(req) except BaseException, err: got_exc = True finally: err = event.wait() if err: raise Exception(err) self.assert_(not got_exc) def test_PUT_reset_container_sync(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() self.assertEquals(info['x_container_sync_point1'], -1) self.assertEquals(info['x_container_sync_point2'], -1) db.set_x_container_sync_points(123, 456) info = db.get_info() self.assertEquals(info['x_container_sync_point1'], 123) self.assertEquals(info['x_container_sync_point2'], 456) # Set to same value req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() self.assertEquals(info['x_container_sync_point1'], 123) self.assertEquals(info['x_container_sync_point2'], 456) # Set to new value req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 202) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() self.assertEquals(info['x_container_sync_point1'], -1) self.assertEquals(info['x_container_sync_point2'], -1) def test_POST_reset_container_sync(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() self.assertEquals(info['x_container_sync_point1'], -1) self.assertEquals(info['x_container_sync_point2'], -1) db.set_x_container_sync_points(123, 456) info = db.get_info() self.assertEquals(info['x_container_sync_point1'], 123) self.assertEquals(info['x_container_sync_point2'], 456) # Set to same value req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() self.assertEquals(info['x_container_sync_point1'], 123) self.assertEquals(info['x_container_sync_point2'], 456) # Set to new value req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'}, headers={'x-timestamp': '1', 'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'}) resp = self.controller.POST(req) self.assertEquals(resp.status_int, 204) db = self.controller._get_container_broker('sda1', 'p', 'a', 'c') info = db.get_info() self.assertEquals(info['x_container_sync_point1'], -1) self.assertEquals(info['x_container_sync_point2'], -1) def test_DELETE(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '2'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '3'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 404) def test_DELETE_not_found(self): # Even if the container wasn't previously heard of, the container # server will accept the delete and replicate it to where it belongs # later. req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 404) def test_DELETE_object(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '2'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0', 'HTTP_X_SIZE': 1, 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '3'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 409) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '4'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '5'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '6'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 404) def test_DELETE_account_update(self): bindsock = listen(('127.0.0.1', 0)) def accept(return_code, expected_timestamp): try: with Timeout(3): sock, addr = bindsock.accept() inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() self.assertEquals(inc.readline(), 'PUT /sda1/123/a/c HTTP/1.1\r\n') headers = {} line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0].lower()] = \ line.split(':')[1].strip() line = inc.readline() self.assertEquals(headers['x-delete-timestamp'], expected_timestamp) except BaseException, err: return err return None req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '0000000002.00000', 'X-Account-Host': '%s:%s' % bindsock.getsockname(), 'X-Account-Partition': '123', 'X-Account-Device': 'sda1'}) event = spawn(accept, 204, '0000000002.00000') try: with Timeout(3): resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 204) finally: err = event.wait() if err: raise Exception(err) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '2'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '0000000003.00000', 'X-Account-Host': '%s:%s' % bindsock.getsockname(), 'X-Account-Partition': '123', 'X-Account-Device': 'sda1'}) event = spawn(accept, 404, '0000000003.00000') try: with Timeout(3): resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 404) finally: err = event.wait() if err: raise Exception(err) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '4'}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '0000000005.00000', 'X-Account-Host': '%s:%s' % bindsock.getsockname(), 'X-Account-Partition': '123', 'X-Account-Device': 'sda1'}) event = spawn(accept, 503, '0000000005.00000') got_exc = False try: with Timeout(3): resp = self.controller.DELETE(req) except BaseException, err: got_exc = True finally: err = event.wait() if err: raise Exception(err) self.assert_(not got_exc) def test_DELETE_invalid_partition(self): req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 400) def test_DELETE_timestamp_not_float(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': 'not-float'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 400) def test_DELETE_insufficient_storage(self): self.controller = container_server.ContainerController( {'devices': self.testdir}) req = Request.blank('/sda-null/p/a/c', environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.DELETE(req) self.assertEquals(resp.status_int, 507) def test_GET_over_limit(self): req = Request.blank('/sda1/p/a/c?limit=%d' % (container_server.CONTAINER_LISTING_LIMIT + 1), environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 412) def test_GET_json(self): # make a container req = Request.blank('/sda1/p/a/jsonc', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) # test an empty container req = Request.blank('/sda1/p/a/jsonc?format=json', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 200) self.assertEquals(simplejson.loads(resp.body), []) # fill the container for i in range(3): req = Request.blank('/sda1/p/a/jsonc/%s' % i, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) # test format json_body = [{"name":"0", "hash":"x", "bytes":0, "content_type":"text/plain", "last_modified":"1970-01-01T00:00:01.000000Z"}, {"name":"1", "hash":"x", "bytes":0, "content_type":"text/plain", "last_modified":"1970-01-01T00:00:01.000000Z"}, {"name":"2", "hash":"x", "bytes":0, "content_type":"text/plain", "last_modified":"1970-01-01T00:00:01.000000Z"}] req = Request.blank('/sda1/p/a/jsonc?format=json', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.content_type, 'application/json') self.assertEquals(simplejson.loads(resp.body), json_body) self.assertEquals(resp.charset, 'utf-8') resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'application/json') for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9', '*/*;q=0.9,application/json;q=1.0', 'application/*'): req = Request.blank('/sda1/p/a/jsonc', environ={'REQUEST_METHOD': 'GET'}) req.accept = accept resp = self.controller.GET(req) self.assertEquals(simplejson.loads(resp.body), json_body, 'Invalid body for Accept: %s' % accept) self.assertEquals(resp.content_type, 'application/json', 'Invalid content_type for Accept: %s' % accept) resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'application/json', 'Invalid content_type for Accept: %s' % accept) def test_GET_plain(self): # make a container req = Request.blank('/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) # test an empty container req = Request.blank('/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204) # fill the container for i in range(3): req = Request.blank('/sda1/p/a/plainc/%s' % i, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) plain_body = '0\n1\n2\n' req = Request.blank('/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.content_type, 'text/plain') self.assertEquals(resp.body, plain_body) self.assertEquals(resp.charset, 'utf-8') resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'text/plain') for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9', '*/*;q=0.9,application/xml;q=0.8', '*/*', 'text/plain,application/xml'): req = Request.blank('/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'}) req.accept = accept resp = self.controller.GET(req) self.assertEquals(resp.body, plain_body, 'Invalid body for Accept: %s' % accept) self.assertEquals(resp.content_type, 'text/plain', 'Invalid content_type for Accept: %s' % accept) resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'text/plain', 'Invalid content_type for Accept: %s' % accept) # test conflicting formats req = Request.blank('/sda1/p/a/plainc?format=plain', environ={'REQUEST_METHOD': 'GET'}) req.accept = 'application/json' resp = self.controller.GET(req) self.assertEquals(resp.content_type, 'text/plain') self.assertEquals(resp.body, plain_body) # test unknown format uses default plain req = Request.blank('/sda1/p/a/plainc?format=somethingelse', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 200) self.assertEquals(resp.content_type, 'text/plain') self.assertEquals(resp.body, plain_body) def test_GET_json_last_modified(self): # make a container req = Request.blank('/sda1/p/a/jsonc', environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) for i, d in [(0, 1.5), (1, 1.0), ]: req = Request.blank('/sda1/p/a/jsonc/%s' % i, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': d, 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) # test format # last_modified format must be uniform, even when there are not msecs json_body = [{"name":"0", "hash":"x", "bytes":0, "content_type":"text/plain", "last_modified":"1970-01-01T00:00:01.500000Z"}, {"name":"1", "hash":"x", "bytes":0, "content_type":"text/plain", "last_modified":"1970-01-01T00:00:01.000000Z"}, ] req = Request.blank('/sda1/p/a/jsonc?format=json', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.content_type, 'application/json') self.assertEquals(simplejson.loads(resp.body), json_body) self.assertEquals(resp.charset, 'utf-8') def test_GET_xml(self): # make a container req = Request.blank('/sda1/p/a/xmlc', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) # fill the container for i in range(3): req = Request.blank('/sda1/p/a/xmlc/%s' % i, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) xml_body = '<?xml version="1.0" encoding="UTF-8"?>\n' \ '<container name="xmlc">' \ '<object><name>0</name><hash>x</hash><bytes>0</bytes>' \ '<content_type>text/plain</content_type>' \ '<last_modified>1970-01-01T00:00:01.000000Z' \ '</last_modified></object>' \ '<object><name>1</name><hash>x</hash><bytes>0</bytes>' \ '<content_type>text/plain</content_type>' \ '<last_modified>1970-01-01T00:00:01.000000Z' \ '</last_modified></object>' \ '<object><name>2</name><hash>x</hash><bytes>0</bytes>' \ '<content_type>text/plain</content_type>' \ '<last_modified>1970-01-01T00:00:01.000000Z' \ '</last_modified></object>' \ '</container>' # tests req = Request.blank('/sda1/p/a/xmlc?format=xml', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.content_type, 'application/xml') self.assertEquals(resp.body, xml_body) self.assertEquals(resp.charset, 'utf-8') resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'application/xml') for xml_accept in ('application/xml', 'application/xml;q=1.0,*/*;q=0.9', '*/*;q=0.9,application/xml;q=1.0', 'application/xml,text/xml'): req = Request.blank('/sda1/p/a/xmlc', environ={'REQUEST_METHOD': 'GET'}) req.accept = xml_accept resp = self.controller.GET(req) self.assertEquals(resp.body, xml_body, 'Invalid body for Accept: %s' % xml_accept) self.assertEquals(resp.content_type, 'application/xml', 'Invalid content_type for Accept: %s' % xml_accept) resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'application/xml', 'Invalid content_type for Accept: %s' % xml_accept) req = Request.blank('/sda1/p/a/xmlc', environ={'REQUEST_METHOD': 'GET'}) req.accept = 'text/xml' resp = self.controller.GET(req) self.assertEquals(resp.content_type, 'text/xml') self.assertEquals(resp.body, xml_body) def test_GET_marker(self): # make a container req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) # fill the container for i in range(3): req = Request.blank('/sda1/p/a/c/%s' % i, environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) # test limit with marker req = Request.blank('/sda1/p/a/c?limit=2&marker=1', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) result = resp.body.split() self.assertEquals(result, ['2', ]) def test_weird_content_types(self): snowman = u'\u2603' req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) for i, ctype in enumerate((snowman.encode('utf-8'), 'text/plain; "utf-8"')): req = Request.blank('/sda1/p/a/c/%s' % i, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': ctype, 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c?format=json', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) result = [x['content_type'] for x in simplejson.loads(resp.body)] self.assertEquals(result, [u'\u2603', 'text/plain; "utf-8"']) def test_GET_accept_not_valid(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Put-Timestamp': '1', 'X-Delete-Timestamp': '0', 'X-Object-Count': '0', 'X-Bytes-Used': '0', 'X-Timestamp': normalize_timestamp(0)}) self.controller.PUT(req) req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) req.accept = 'application/xml*' resp = self.controller.GET(req) self.assertEquals(resp.status_int, 406) def test_GET_limit(self): # make a container req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) # fill the container for i in range(3): req = Request.blank('/sda1/p/a/c/%s' % i, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) # test limit req = Request.blank('/sda1/p/a/c?limit=2', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) result = resp.body.split() self.assertEquals(result, ['0', '1']) def test_GET_prefix(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) for i in ('a1', 'b1', 'a2', 'b2', 'a3', 'b3'): req = Request.blank('/sda1/p/a/c/%s' % i, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c?prefix=a', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.body.split(), ['a1', 'a2', 'a3']) def test_GET_delimiter_too_long(self): req = Request.blank('/sda1/p/a/c?delimiter=xx', environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 412) def test_GET_delimiter(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'): req = Request.blank('/sda1/p/a/c/%s' % i, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c?prefix=US-&delimiter=-&format=json', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(simplejson.loads(resp.body), [{"subdir": "US-OK-"}, {"subdir": "US-TX-"}, {"subdir": "US-UT-"}]) def test_GET_delimiter_xml(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'): req = Request.blank('/sda1/p/a/c/%s' % i, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c?prefix=US-&delimiter=-&format=xml', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.body, '<?xml version="1.0" encoding="UTF-8"?>' '\n<container name="c"><subdir name="US-OK-"><name>US-OK-</name></subdir>' '<subdir name="US-TX-"><name>US-TX-</name></subdir>' '<subdir name="US-UT-"><name>US-UT-</name></subdir></container>') def test_GET_delimiter_xml_with_quotes(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) req = Request.blank('/sda1/p/a/c/<\'sub\' "dir">/object', environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c?delimiter=/&format=xml', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals( resp.body, '<?xml version="1.0" encoding="UTF-8"?>\n<container name="c">' '<subdir name="&lt;\'sub\' &quot;dir&quot;&gt;/">' '<name>&lt;\'sub\' "dir"&gt;/</name></subdir></container>') def test_GET_path(self): req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) resp = self.controller.PUT(req) for i in ('US/TX', 'US/TX/B', 'US/OK', 'US/OK/B', 'US/UT/A'): req = Request.blank('/sda1/p/a/c/%s' % i, environ={ 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0}) resp = self.controller.PUT(req) self.assertEquals(resp.status_int, 201) req = Request.blank('/sda1/p/a/c?path=US&format=json', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(simplejson.loads(resp.body), [{"name":"US/OK", "hash":"x", "bytes":0, "content_type":"text/plain", "last_modified":"1970-01-01T00:00:01.000000Z"}, {"name":"US/TX", "hash":"x", "bytes":0, "content_type":"text/plain", "last_modified":"1970-01-01T00:00:01.000000Z"}]) def test_GET_insufficient_storage(self): self.controller = container_server.ContainerController( {'devices': self.testdir}) req = Request.blank('/sda-null/p/a/c', environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 507) def test_through_call(self): inbuf = StringIO() errbuf = StringIO() outbuf = StringIO() def start_response(*args): outbuf.writelines(args) self.controller.__call__({'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'PATH_INFO': '/sda1/p/a/c', 'SERVER_NAME': '127.0.0.1', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', 'CONTENT_LENGTH': '0', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.input': inbuf, 'wsgi.errors': errbuf, 'wsgi.multithread': False, 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) self.assertEquals(errbuf.getvalue(), '') self.assertEquals(outbuf.getvalue()[:4], '404 ') def test_through_call_invalid_path(self): inbuf = StringIO() errbuf = StringIO() outbuf = StringIO() def start_response(*args): outbuf.writelines(args) self.controller.__call__({'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'PATH_INFO': '/bob', 'SERVER_NAME': '127.0.0.1', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', 'CONTENT_LENGTH': '0', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.input': inbuf, 'wsgi.errors': errbuf, 'wsgi.multithread': False, 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) self.assertEquals(errbuf.getvalue(), '') self.assertEquals(outbuf.getvalue()[:4], '400 ') def test_through_call_invalid_path_utf8(self): inbuf = StringIO() errbuf = StringIO() outbuf = StringIO() def start_response(*args): outbuf.writelines(args) self.controller.__call__({'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'PATH_INFO': '\x00', 'SERVER_NAME': '127.0.0.1', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', 'CONTENT_LENGTH': '0', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.input': inbuf, 'wsgi.errors': errbuf, 'wsgi.multithread': False, 'wsgi.multiprocess': False, 'wsgi.run_once': False}, start_response) self.assertEquals(errbuf.getvalue(), '') self.assertEquals(outbuf.getvalue()[:4], '412 ') def test_invalid_method_doesnt_exist(self): errbuf = StringIO() outbuf = StringIO() def start_response(*args): outbuf.writelines(args) self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist', 'PATH_INFO': '/sda1/p/a/c'}, start_response) self.assertEquals(errbuf.getvalue(), '') self.assertEquals(outbuf.getvalue()[:4], '405 ') def test_invalid_method_is_not_public(self): errbuf = StringIO() outbuf = StringIO() def start_response(*args): outbuf.writelines(args) self.controller.__call__({'REQUEST_METHOD': '__init__', 'PATH_INFO': '/sda1/p/a/c'}, start_response) self.assertEquals(errbuf.getvalue(), '') self.assertEquals(outbuf.getvalue()[:4], '405 ') def test_params_format(self): self.controller.PUT(Request.blank('/sda1/p/a/c', headers={'X-Timestamp': normalize_timestamp(1)}, environ={'REQUEST_METHOD': 'PUT'})) for format in ('xml', 'json'): req = Request.blank('/sda1/p/a/c?format=%s' % format, environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 200) def test_params_utf8(self): # Bad UTF8 sequence, all parameters should cause 400 error for param in ('delimiter', 'limit', 'marker', 'path', 'prefix', 'end_marker', 'format'): req = Request.blank('/sda1/p/a/c?%s=\xce' % param, environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) self.assertEquals(resp.status_int, 400, "%d on param %s" % (resp.status_int, param)) # Good UTF8 sequence for delimiter, too long (1 byte delimiters only) req = Request.blank('/sda1/p/a/c?delimiter=\xce\xa9', environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 412, "%d on param delimiter" % (resp.status_int)) self.controller.PUT(Request.blank('/sda1/p/a/c', headers={'X-Timestamp': normalize_timestamp(1)}, environ={'REQUEST_METHOD': 'PUT'})) # Good UTF8 sequence, ignored for limit, doesn't affect other queries for param in ('limit', 'marker', 'path', 'prefix', 'end_marker', 'format'): req = Request.blank('/sda1/p/a/c?%s=\xce\xa9' % param, environ={'REQUEST_METHOD': 'GET'}) resp = self.controller.GET(req) self.assertEquals(resp.status_int, 204, "%d on param %s" % (resp.status_int, param)) def test_put_auto_create(self): headers = {'x-timestamp': normalize_timestamp(1), 'x-size': '0', 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'} resp = self.controller.PUT(Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) resp = self.controller.PUT(Request.blank('/sda1/p/.a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 201) resp = self.controller.PUT(Request.blank('/sda1/p/a/.c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) resp = self.controller.PUT(Request.blank('/sda1/p/a/.c/.o', environ={'REQUEST_METHOD': 'PUT'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) def test_delete_auto_create(self): headers = {'x-timestamp': normalize_timestamp(1)} resp = self.controller.DELETE(Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) resp = self.controller.DELETE(Request.blank('/sda1/p/.a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers))) self.assertEquals(resp.status_int, 204) resp = self.controller.DELETE(Request.blank('/sda1/p/a/.c/o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) resp = self.controller.DELETE(Request.blank('/sda1/p/a/.c/.o', environ={'REQUEST_METHOD': 'DELETE'}, headers=dict(headers))) self.assertEquals(resp.status_int, 404) def test_content_type_on_HEAD(self): self.controller.PUT(Request.blank('/sda1/p/a/o', headers={'X-Timestamp': normalize_timestamp(1)}, environ={'REQUEST_METHOD': 'PUT'})) env = {'REQUEST_METHOD': 'HEAD'} req = Request.blank('/sda1/p/a/o?format=xml', environ=env) resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'application/xml') self.assertEquals(resp.charset, 'utf-8') req = Request.blank('/sda1/p/a/o?format=json', environ=env) resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'application/json') self.assertEquals(resp.charset, 'utf-8') req = Request.blank('/sda1/p/a/o', environ=env) resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'text/plain') self.assertEquals(resp.charset, 'utf-8') req = Request.blank( '/sda1/p/a/o', headers={'Accept': 'application/json'}, environ=env) resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'application/json') self.assertEquals(resp.charset, 'utf-8') req = Request.blank( '/sda1/p/a/o', headers={'Accept': 'application/xml'}, environ=env) resp = self.controller.HEAD(req) self.assertEquals(resp.content_type, 'application/xml') self.assertEquals(resp.charset, 'utf-8') def test_updating_multiple_container_servers(self): http_connect_args = [] def fake_http_connect(ipaddr, port, device, partition, method, path, headers=None, query_string=None, ssl=False): class SuccessfulFakeConn(object): @property def status(self): return 200 def getresponse(self): return self def read(self): return '' captured_args = {'ipaddr': ipaddr, 'port': port, 'device': device, 'partition': partition, 'method': method, 'path': path, 'ssl': ssl, 'headers': headers, 'query_string': query_string} http_connect_args.append( dict((k, v) for k, v in captured_args.iteritems() if v is not None)) req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '12345', 'X-Account-Partition': '30', 'X-Account-Host': '1.2.3.4:5, 6.7.8.9:10', 'X-Account-Device': 'sdb1, sdf1'}) orig_http_connect = container_server.http_connect try: container_server.http_connect = fake_http_connect self.controller.PUT(req) finally: container_server.http_connect = orig_http_connect http_connect_args.sort(key=operator.itemgetter('ipaddr')) self.assertEquals(len(http_connect_args), 2) self.assertEquals( http_connect_args[0], {'ipaddr': '1.2.3.4', 'port': '5', 'path': '/a/c', 'device': 'sdb1', 'partition': '30', 'method': 'PUT', 'ssl': False, 'headers': HeaderKeyDict({'x-bytes-used': 0, 'x-delete-timestamp': '0', 'x-object-count': 0, 'x-put-timestamp': '0000012345.00000', 'referer': 'PUT http://localhost/sda1/p/a/c', 'user-agent': 'container-server %d' % os.getpid(), 'x-trans-id': '-'})}) self.assertEquals( http_connect_args[1], {'ipaddr': '6.7.8.9', 'port': '10', 'path': '/a/c', 'device': 'sdf1', 'partition': '30', 'method': 'PUT', 'ssl': False, 'headers': HeaderKeyDict({'x-bytes-used': 0, 'x-delete-timestamp': '0', 'x-object-count': 0, 'x-put-timestamp': '0000012345.00000', 'referer': 'PUT http://localhost/sda1/p/a/c', 'user-agent': 'container-server %d' % os.getpid(), 'x-trans-id': '-'})}) def test_serv_reserv(self): """ Test replication_server flag was set from configuration file. """ container_controller = container_server.ContainerController conf = {'devices': self.testdir, 'mount_check': 'false'} self.assertEquals(container_controller(conf).replication_server, None) for val in [True, '1', 'True', 'true']: conf['replication_server'] = val self.assertTrue(container_controller(conf).replication_server) for val in [False, 0, '0', 'False', 'false', 'test_string']: conf['replication_server'] = val self.assertFalse(container_controller(conf).replication_server) def test_list_allowed_methods(self): """ Test list of allowed_methods """ obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST'] repl_methods = ['REPLICATE'] for method_name in obj_methods: method = getattr(self.controller, method_name) self.assertFalse(hasattr(method, 'replication')) for method_name in repl_methods: method = getattr(self.controller, method_name) self.assertEquals(method.replication, True) def test_correct_allowed_method(self): """ Test correct work for allowed method using swift.container_server.ContainerController.__call__ """ inbuf = StringIO() errbuf = StringIO() outbuf = StringIO() self.controller = container_server.ContainerController( {'devices': self.testdir, 'mount_check': 'false', 'replication_server': 'false'}) def start_response(*args): """ Sends args to outbuf """ outbuf.writelines(args) method = 'PUT' env = {'REQUEST_METHOD': method, 'SCRIPT_NAME': '', 'PATH_INFO': '/sda1/p/a/c', 'SERVER_NAME': '127.0.0.1', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', 'CONTENT_LENGTH': '0', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.input': inbuf, 'wsgi.errors': errbuf, 'wsgi.multithread': False, 'wsgi.multiprocess': False, 'wsgi.run_once': False} method_res = mock.MagicMock() mock_method = public(lambda x: mock.MagicMock(return_value=method_res)) with mock.patch.object(self.controller, method, new=mock_method): response = self.controller.__call__(env, start_response) self.assertEqual(response, method_res) def test_not_allowed_method(self): """ Test correct work for NOT allowed method using swift.container_server.ContainerController.__call__ """ inbuf = StringIO() errbuf = StringIO() outbuf = StringIO() self.controller = container_server.ContainerController( {'devices': self.testdir, 'mount_check': 'false', 'replication_server': 'false'}) def start_response(*args): """ Sends args to outbuf """ outbuf.writelines(args) method = 'PUT' env = {'REQUEST_METHOD': method, 'SCRIPT_NAME': '', 'PATH_INFO': '/sda1/p/a/c', 'SERVER_NAME': '127.0.0.1', 'SERVER_PORT': '8080', 'SERVER_PROTOCOL': 'HTTP/1.0', 'CONTENT_LENGTH': '0', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.input': inbuf, 'wsgi.errors': errbuf, 'wsgi.multithread': False, 'wsgi.multiprocess': False, 'wsgi.run_once': False} answer = ['<html><h1>Method Not Allowed</h1><p>The method is not ' 'allowed for this resource.</p></html>'] mock_method = replication(public(lambda x: mock.MagicMock())) with mock.patch.object(self.controller, method, new=mock_method): response = self.controller.__call__(env, start_response) self.assertEqual(response, answer) if __name__ == '__main__': unittest.main()
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Pack distribution folders into a single file. """ import os import shutil import struct import subprocess import sys from contextlib import contextmanager from nuitka import Options, OutputDirectories from nuitka.build import SconsInterface from nuitka.Options import assumeYesForDownloads, getIconPaths from nuitka.OutputDirectories import getResultBasepath, getResultFullpath from nuitka.plugins.Plugins import Plugins from nuitka.PostProcessing import ( executePostProcessingResources, version_resources, ) from nuitka.Progress import ( closeProgressBar, reportProgressBar, setupProgressBar, ) from nuitka.PythonVersions import python_version from nuitka.Tracing import onefile_logger, postprocessing_logger from nuitka.utils.Download import getCachedDownload from nuitka.utils.Execution import getNullInput, withEnvironmentVarsOverriden from nuitka.utils.FileOperations import ( addFileExecutablePermission, deleteFile, getFileContents, getFileList, removeDirectory, ) from nuitka.utils.SharedLibraries import locateDLL from nuitka.utils.Utils import ( getArchitecture, getOS, hasOnefileSupportedOS, isWin32Windows, ) def packDistFolderToOnefile(dist_dir, binary_filename): """Pack distribution to onefile, i.e. a single file that is directly executable.""" onefile_output_filename = getResultFullpath(onefile=True) if getOS() == "Windows" or Options.isOnefileTempDirMode(): packDistFolderToOnefileBootstrap(onefile_output_filename, dist_dir) elif getOS() == "Linux": packDistFolderToOnefileLinux(onefile_output_filename, dist_dir, binary_filename) else: postprocessing_logger.sysexit( "Onefile mode is not yet available on %r." % getOS() ) Plugins.onOnefileFinished(onefile_output_filename) def _getAppImageToolPath(for_operation, assume_yes_for_downloads): """Return the path of appimagetool (for Linux). Will prompt the user to download if not already cached in AppData directory for Nuitka. """ arch_name = getArchitecture() # Mismatch between Debian arch name and appimage arch naming. if arch_name == "armv7l": arch_name = "armhf" appimagetool_url = ( "https://github.com/AppImage/AppImageKit/releases/download/12/appimagetool-%s.AppImage" % arch_name ) return getCachedDownload( url=appimagetool_url, is_arch_specific=True, binary=appimagetool_url.rsplit("/", 1)[1], flatten=True, specifity=appimagetool_url.rsplit("/", 2)[1], message="""\ Nuitka will make use of AppImage (https://appimage.org/) tool to combine Nuitka dist folder to onefile binary.""", reject="Nuitka does not work in --onefile on Linux without." if for_operation else None, assume_yes_for_downloads=assume_yes_for_downloads, ) def packDistFolderToOnefileLinux(onefile_output_filename, dist_dir, binary_filename): """Pack to onefile binary on Linux. Notes: This is mostly a wrapper around AppImage, which does all the heavy lifting. """ if not locateDLL("fuse"): postprocessing_logger.sysexit( """\ Error, the fuse library (libfuse.so.x from fuse2, *not* fuse3) must be installed for onefile creation to work on Linux.""" ) # This might be possible to avoid being done with --runtime-file. apprun_filename = os.path.join(dist_dir, "AppRun") with open(apprun_filename, "w") as output_file: output_file.write( """\ #!/bin/bash exec -a $ARGV0 $APPDIR/%s \"$@\"""" % os.path.basename(binary_filename) ) addFileExecutablePermission(apprun_filename) binary_basename = os.path.basename(getResultBasepath()) icon_paths = getIconPaths() assert icon_paths extension = os.path.splitext(icon_paths[0])[1].lower() shutil.copyfile(icon_paths[0], getResultBasepath() + extension) with open(getResultBasepath() + ".desktop", "w") as output_file: output_file.write( """\ [Desktop Entry] Name=%(binary_basename)s Exec=%(binary_filename)s Icon=%(binary_basename)s Type=Application Categories=Utility;""" % { "binary_basename": binary_basename, "binary_filename": os.path.basename(binary_filename), } ) postprocessing_logger.info( "Creating single file from dist folder, this may take a while." ) stdout_filename = binary_filename + ".appimage.stdout.txt" stderr_filename = binary_filename + ".appimage.stderr.txt" stdout_file = open(stdout_filename, "wb") stderr_file = open(stderr_filename, "wb") # Starting the process while locked, so file handles are not duplicated. appimagetool_process = subprocess.Popen( ( _getAppImageToolPath( for_operation=True, assume_yes_for_downloads=assumeYesForDownloads() ), dist_dir, "--comp", "xz", "-n", onefile_output_filename, ), shell=False, stdin=getNullInput(), stdout=stdout_file, stderr=stderr_file, ) result = appimagetool_process.wait() stdout_file.close() stderr_file.close() if result != 0: # Useless result if there were errors, so now remove it. deleteFile(onefile_output_filename, must_exist=False) stderr = getFileContents(stderr_filename, mode="rb") if b"Text file busy" in stderr: postprocessing_logger.sysexit( "Error, error exit from AppImage because target file is locked." ) if b"modprobe fuse" in stderr: postprocessing_logger.sysexit( "Error, error exit from AppImage because fuse kernel module was not loaded." ) postprocessing_logger.sysexit( "Error, error exit from AppImage, check its outputs '%s' and '%s'." % (stdout_filename, stderr_filename) ) if not os.path.exists(onefile_output_filename): postprocessing_logger.sysexit( "Error, expected output file %r not created by AppImage, check its outputs '%s' and '%s'." % (onefile_output_filename, stdout_filename, stderr_filename) ) deleteFile(stdout_filename, must_exist=True) deleteFile(stderr_filename, must_exist=True) postprocessing_logger.info("Completed onefile creation.") def _runOnefileScons(quiet, onefile_compression): source_dir = OutputDirectories.getSourceDirectoryPath(onefile=True) SconsInterface.cleanSconsDirectory(source_dir) asBoolStr = SconsInterface.asBoolStr options = { "result_name": OutputDirectories.getResultBasepath(onefile=True), "result_exe": OutputDirectories.getResultFullpath(onefile=True), "source_dir": source_dir, "debug_mode": asBoolStr(Options.is_debug), "unstripped_mode": asBoolStr(Options.isUnstripped()), "experimental": ",".join(Options.getExperimentalIndications()), "trace_mode": asBoolStr(Options.shallTraceExecution()), "target_arch": getArchitecture(), "python_prefix": sys.prefix, "nuitka_src": SconsInterface.getSconsDataPath(), "compiled_exe": OutputDirectories.getResultFullpath(onefile=False), "onefile_compression": asBoolStr(onefile_compression), "onefile_splash_screen": asBoolStr( Options.getWindowsSplashScreen() is not None ), } if Options.isClang(): options["clang_mode"] = "true" SconsInterface.setCommonOptions(options) onefile_env_values = {} if Options.isOnefileTempDirMode(): onefile_env_values["ONEFILE_TEMP_SPEC"] = Options.getOnefileTempDirSpec( use_default=True ) else: # Merge version information if possible, to avoid collisions, or deep nesting # in file system. product_version = version_resources["ProductVersion"] file_version = version_resources["FileVersion"] if product_version != file_version: effective_version = "%s-%s" % (product_version, file_version) else: effective_version = file_version onefile_env_values["ONEFILE_COMPANY"] = version_resources["CompanyName"] onefile_env_values["ONEFILE_PRODUCT"] = version_resources["ProductName"] onefile_env_values["ONEFILE_VERSION"] = effective_version with withEnvironmentVarsOverriden(onefile_env_values): result = SconsInterface.runScons( options=options, quiet=quiet, scons_filename="Onefile.scons" ) # Exit if compilation failed. if not result: onefile_logger.sysexit("Error, onefile bootstrap binary build failed.") if Options.isRemoveBuildDir(): onefile_logger.info("Removing onefile build directory %r." % source_dir) removeDirectory(path=source_dir, ignore_errors=False) assert not os.path.exists(source_dir) else: onefile_logger.info("Keeping onefile build directory %r." % source_dir) def _pickCompressor(): try: from zstandard import ZstdCompressor # pylint: disable=I0021,import-error except ImportError: if python_version < 0x350: onefile_logger.info( "Onefile compression is not supported before Python 3.5 at this time." ) else: onefile_logger.warning( "Onefile mode cannot compress without 'zstandard' module installed." ) else: cctx = ZstdCompressor(level=22) @contextmanager def useCompressedFile(output_file): with cctx.stream_writer(output_file, closefd=False) as compressed_file: yield compressed_file onefile_logger.info("Using compression for onefile payload.") return b"Y", useCompressedFile # By default use the same file handle that does not compress. @contextmanager def useSameFile(output_file): yield output_file return b"X", useSameFile def packDistFolderToOnefileBootstrap(onefile_output_filename, dist_dir): # Dealing with details, pylint: disable=too-many-locals postprocessing_logger.info( "Creating single file from dist folder, this may take a while." ) onefile_logger.info("Running bootstrap binary compilation via Scons.") # Now need to append to payload it, potentially compressing it. compression_indicator, compressor = _pickCompressor() # First need to create the bootstrap binary for unpacking. _runOnefileScons( quiet=not Options.isShowScons(), onefile_compression=compression_indicator == b"Y", ) if isWin32Windows(): executePostProcessingResources(manifest=None, onefile=True) with open(onefile_output_filename, "ab") as output_file: # Seeking to end of file seems necessary on Python2 at least, maybe it's # just that tell reports wrong value initially. output_file.seek(0, 2) start_pos = output_file.tell() output_file.write(b"KA" + compression_indicator) # Move the binary to start immediately to the start position start_binary = getResultFullpath(onefile=False) file_list = getFileList(dist_dir, normalize=False) file_list.remove(start_binary) file_list.insert(0, start_binary) if isWin32Windows(): filename_encoding = "utf-16le" else: filename_encoding = "utf8" payload_size = 0 setupProgressBar( stage="Onefile Payload", unit="module", total=len(file_list), ) with compressor(output_file) as compressed_file: # TODO: Use progress bar here. for filename_full in file_list: filename_relative = os.path.relpath(filename_full, dist_dir) reportProgressBar( item=filename_relative, update=False, ) filename_encoded = (filename_relative + "\0").encode(filename_encoding) compressed_file.write(filename_encoded) payload_size += len(filename_encoded) with open(filename_full, "rb") as input_file: input_file.seek(0, 2) input_size = input_file.tell() input_file.seek(0, 0) compressed_file.write(struct.pack("Q", input_size)) shutil.copyfileobj(input_file, compressed_file) payload_size += input_size + 8 reportProgressBar( item=filename_relative, update=True, ) # Using empty filename as a terminator. filename_encoded = "\0".encode(filename_encoding) compressed_file.write(filename_encoded) payload_size += len(filename_encoded) compressed_size = compressed_file.tell() if compression_indicator == b"Y": onefile_logger.info( "Onefile payload compression ratio (%.2f%%) size %d to %d." % ( (float(compressed_size) / payload_size) * 100, payload_size, compressed_size, ) ) # add padding to have the start position at a double world boundary # this is needed on windows so that a possible certificate immediately # follows the start position pad = output_file.tell() % 8 if pad != 0: output_file.write(bytes(8 - pad)) output_file.write(struct.pack("Q", start_pos)) closeProgressBar() def checkOnefileReadiness(assume_yes_for_downloads): if getOS() == "Linux": app_image_path = _getAppImageToolPath( for_operation=False, assume_yes_for_downloads=assume_yes_for_downloads ) return app_image_path is not None else: return hasOnefileSupportedOS()
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Abstract Base Classes (ABCs) for collections, according to PEP 3119. DON'T USE THIS MODULE DIRECTLY! The classes here should be imported via collections; they are defined here only to alleviate certain bootstrapping issues. Unit tests are in test_collections. """ import abc ABCMeta = abc.ABCMeta abstractmethod = abc.abstractmethod import sys __all__ = ["Hashable", "Iterable", "Iterator", "Sized", "Container", "Callable", "Set", "MutableSet", "Mapping", "MutableMapping", "MappingView", #"KeysView", "ItemsView", "ValuesView", "Sequence", "MutableSequence", ] ### ONE-TRICK PONIES ### def _hasattr(C, attr): try: return any(attr in B.__dict__ for B in C.__mro__) except AttributeError: # Old-style class return hasattr(C, attr) class Hashable(object): __metaclass__ = ABCMeta @abstractmethod def __hash__(self): return 0 @classmethod def __subclasshook__(cls, C): if cls is Hashable: try: for B in C.__mro__: if "__hash__" in B.__dict__: if B.__dict__["__hash__"]: return True break except AttributeError: # Old-style class if getattr(C, "__hash__", None): return True return NotImplemented class Iterable(object): __metaclass__ = ABCMeta @abstractmethod def __iter__(self): while False: yield None @classmethod def __subclasshook__(cls, C): if cls is Iterable: if _hasattr(C, "__iter__"): return True return NotImplemented Iterable.register(str) class Iterator(Iterable): @abstractmethod def next(self): 'Return the next item from the iterator. When exhausted, raise StopIteration' raise StopIteration def __iter__(self): return self @classmethod def __subclasshook__(cls, C): if cls is Iterator: if _hasattr(C, "next") and _hasattr(C, "__iter__"): return True return NotImplemented class Sized(object): __metaclass__ = ABCMeta @abstractmethod def __len__(self): return 0 @classmethod def __subclasshook__(cls, C): if cls is Sized: if _hasattr(C, "__len__"): return True return NotImplemented class Container(object): __metaclass__ = ABCMeta @abstractmethod def __contains__(self, x): return False @classmethod def __subclasshook__(cls, C): if cls is Container: if _hasattr(C, "__contains__"): return True return NotImplemented class Callable(object): __metaclass__ = ABCMeta @abstractmethod def __call__(self, *args, **kwds): return False @classmethod def __subclasshook__(cls, C): if cls is Callable: if _hasattr(C, "__call__"): return True return NotImplemented ### SETS ### class Set(Sized, Iterable, Container): """A set is a finite, iterable container. This class provides concrete generic implementations of all methods except for __contains__, __iter__ and __len__. To override the comparisons (presumably for speed, as the semantics are fixed), redefine __le__ and __ge__, then the other operations will automatically follow suit. """ def __le__(self, other): if not isinstance(other, Set): return NotImplemented if len(self) > len(other): return False for elem in self: if elem not in other: return False return True def __lt__(self, other): if not isinstance(other, Set): return NotImplemented return len(self) < len(other) and self.__le__(other) def __gt__(self, other): if not isinstance(other, Set): return NotImplemented return len(self) > len(other) and self.__ge__(other) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented if len(self) < len(other): return False for elem in other: if elem not in self: return False return True def __eq__(self, other): if not isinstance(other, Set): return NotImplemented return len(self) == len(other) and self.__le__(other) def __ne__(self, other): return not (self == other) @classmethod def _from_iterable(cls, it): '''Construct an instance of the class from any iterable input. Must override this method if the class constructor signature does not accept an iterable for an input. ''' return cls(it) def __and__(self, other): if not isinstance(other, Iterable): return NotImplemented return self._from_iterable(value for value in other if value in self) __rand__ = __and__ def isdisjoint(self, other): 'Return True if two sets have a null intersection.' for value in other: if value in self: return False return True def __or__(self, other): if not isinstance(other, Iterable): return NotImplemented chain = (e for s in (self, other) for e in s) return self._from_iterable(chain) __ror__ = __or__ def __sub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): return NotImplemented other = self._from_iterable(other) return self._from_iterable(value for value in self if value not in other) def __rsub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): return NotImplemented other = self._from_iterable(other) return self._from_iterable(value for value in other if value not in self) def __xor__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): return NotImplemented other = self._from_iterable(other) return (self - other) | (other - self) __rxor__ = __xor__ # Sets are not hashable by default, but subclasses can change this __hash__ = None def _hash(self): """Compute the hash value of a set. Note that we don't define __hash__: not all sets are hashable. But if you define a hashable set type, its __hash__ should call this function. This must be compatible __eq__. All sets ought to compare equal if they contain the same elements, regardless of how they are implemented, and regardless of the order of the elements; so there's not much freedom for __eq__ or __hash__. We match the algorithm used by the built-in frozenset type. """ MAX = sys.maxint MASK = 2 * MAX + 1 n = len(self) h = 1927868237 * (n + 1) h &= MASK for x in self: hx = hash(x) h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 h &= MASK h = h * 69069 + 907133923 h &= MASK if h > MAX: h -= MASK + 1 if h == -1: h = 590923713 return h Set.register(frozenset) class MutableSet(Set): """A mutable set is a finite, iterable container. This class provides concrete generic implementations of all methods except for __contains__, __iter__, __len__, add(), and discard(). To override the comparisons (presumably for speed, as the semantics are fixed), all you have to do is redefine __le__ and then the other operations will automatically follow suit. """ @abstractmethod def add(self, value): """Add an element.""" raise NotImplementedError @abstractmethod def discard(self, value): """Remove an element. Do not raise an exception if absent.""" raise NotImplementedError def remove(self, value): """Remove an element. If not a member, raise a KeyError.""" if value not in self: raise KeyError(value) self.discard(value) def pop(self): """Return the popped value. Raise KeyError if empty.""" it = iter(self) try: value = next(it) except StopIteration: raise KeyError self.discard(value) return value def clear(self): """This is slow (creates N new iterators!) but effective.""" try: while True: self.pop() except KeyError: pass def __ior__(self, it): for value in it: self.add(value) return self def __iand__(self, it): for value in (self - it): self.discard(value) return self def __ixor__(self, it): if it is self: self.clear() else: if not isinstance(it, Set): it = self._from_iterable(it) for value in it: if value in self: self.discard(value) else: self.add(value) return self def __isub__(self, it): if it is self: self.clear() else: for value in it: self.discard(value) return self MutableSet.register(set) ### MAPPINGS ### class Mapping(Sized, Iterable, Container): """A Mapping is a generic container for associating key/value pairs. This class provides concrete generic implementations of all methods except for __getitem__, __iter__, and __len__. """ @abstractmethod def __getitem__(self, key): raise KeyError def get(self, key, default=None): 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' try: return self[key] except KeyError: return default def __contains__(self, key): try: self[key] except KeyError: return False else: return True def iterkeys(self): 'D.iterkeys() -> an iterator over the keys of D' return iter(self) def itervalues(self): 'D.itervalues() -> an iterator over the values of D' for key in self: yield self[key] def iteritems(self): 'D.iteritems() -> an iterator over the (key, value) items of D' for key in self: yield (key, self[key]) def keys(self): "D.keys() -> list of D's keys" return list(self) def items(self): "D.items() -> list of D's (key, value) pairs, as 2-tuples" return [(key, self[key]) for key in self] def values(self): "D.values() -> list of D's values" return [self[key] for key in self] # Mappings are not hashable by default, but subclasses can change this __hash__ = None def __eq__(self, other): if not isinstance(other, Mapping): return NotImplemented return dict(self.items()) == dict(other.items()) def __ne__(self, other): return not (self == other) class MappingView(Sized): def __init__(self, mapping): self._mapping = mapping def __len__(self): return len(self._mapping) def __repr__(self): return '{0.__class__.__name__}({0._mapping!r})'.format(self) #class KeysView(MappingView, Set): # # @classmethod # def _from_iterable(self, it): # return set(it) # # def __contains__(self, key): # return key in self._mapping # # def __iter__(self): # for key in self._mapping: # yield key # #KeysView.register(type({}.viewkeys())) # #class ItemsView(MappingView, Set): # # @classmethod # def _from_iterable(self, it): # return set(it) # # def __contains__(self, item): # key, value = item # try: # v = self._mapping[key] # except KeyError: # return False # else: # return v == value # # def __iter__(self): # for key in self._mapping: # yield (key, self._mapping[key]) # #ItemsView.register(type({}.viewitems())) # #class ValuesView(MappingView): # # def __contains__(self, value): # for key in self._mapping: # if value == self._mapping[key]: # return True # return False # # def __iter__(self): # for key in self._mapping: # yield self._mapping[key] # #ValuesView.register(type({}.viewvalues())) class MutableMapping(Mapping): """A MutableMapping is a generic container for associating key/value pairs. This class provides concrete generic implementations of all methods except for __getitem__, __setitem__, __delitem__, __iter__, and __len__. """ @abstractmethod def __setitem__(self, key, value): raise KeyError @abstractmethod def __delitem__(self, key): raise KeyError __marker = object() def pop(self, key, default=__marker): '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' try: value = self[key] except KeyError: if default is self.__marker: raise return default else: del self[key] return value def popitem(self): '''D.popitem() -> (k, v), remove and return some (key, value) pair as a 2-tuple; but raise KeyError if D is empty. ''' try: key = next(iter(self)) except StopIteration: raise KeyError value = self[key] del self[key] return key, value def clear(self): 'D.clear() -> None. Remove all items from D.' try: while True: self.popitem() except KeyError: pass def update(*args, **kwds): ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k, v in F.items(): D[k] = v ''' if not args: raise TypeError("descriptor 'update' of 'MutableMapping' object " "needs an argument") self = args[0] args = args[1:] if len(args) > 1: raise TypeError('update expected at most 1 arguments, got %d' % len(args)) if args: other = args[0] if isinstance(other, Mapping): for key in other: self[key] = other[key] elif hasattr(other, "keys"): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value def setdefault(self, key, default=None): 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' try: return self[key] except KeyError: self[key] = default return default MutableMapping.register(dict) ### SEQUENCES ### class Sequence(Sized, Iterable, Container): """All the operations on a read-only sequence. Concrete subclasses must override __new__ or __init__, __getitem__, and __len__. """ @abstractmethod def __getitem__(self, index): raise IndexError def __iter__(self): i = 0 try: while True: v = self[i] yield v i += 1 except IndexError: return def __contains__(self, value): for v in self: if v == value: return True return False def __reversed__(self): for i in reversed(range(len(self))): yield self[i] def index(self, value): '''S.index(value) -> integer -- return first index of value. Raises ValueError if the value is not present. ''' for i, v in enumerate(self): if v == value: return i raise ValueError def count(self, value): 'S.count(value) -> integer -- return number of occurrences of value' return sum(1 for v in self if v == value) Sequence.register(tuple) Sequence.register(basestring) #Sequence.register(buffer) Sequence.register(xrange) class MutableSequence(Sequence): """All the operations on a read-only sequence. Concrete subclasses must provide __new__ or __init__, __getitem__, __setitem__, __delitem__, __len__, and insert(). """ @abstractmethod def __setitem__(self, index, value): raise IndexError @abstractmethod def __delitem__(self, index): raise IndexError @abstractmethod def insert(self, index, value): 'S.insert(index, object) -- insert object before index' raise IndexError def append(self, value): 'S.append(object) -- append object to the end of the sequence' self.insert(len(self), value) def reverse(self): 'S.reverse() -- reverse *IN PLACE*' n = len(self) for i in range(n//2): self[i], self[n-i-1] = self[n-i-1], self[i] def extend(self, values): 'S.extend(iterable) -- extend sequence by appending elements from the iterable' for v in values: self.append(v) def pop(self, index=-1): '''S.pop([index]) -> item -- remove and return item at index (default last). Raise IndexError if list is empty or index is out of range. ''' v = self[index] del self[index] return v def remove(self, value): '''S.remove(value) -- remove first occurrence of value. Raise ValueError if the value is not present. ''' del self[self.index(value)] def __iadd__(self, values): self.extend(values) return self MutableSequence.register(list)
# -*- coding: utf-8 -*- """ requests.session ~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). """ import os from collections import Mapping from datetime import datetime from .auth import _basic_auth_str from .compat import cookielib, OrderedDict, urljoin, urlparse from .cookies import ( cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .hooks import default_hooks, dispatch_hook from ._internal_utils import to_native_string from .utils import to_key_val_list, default_headers from .exceptions import ( TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) from .packages.urllib3._collections import RecentlyUsedContainer from .structures import CaseInsensitiveDict from .adapters import HTTPAdapter from .utils import ( requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, get_auth_from_url ) from .status_codes import codes # formerly defined here, reexposed here for backward compatibility from .models import REDIRECT_STATI REDIRECT_CACHE_SIZE = 1000 def merge_setting(request_setting, session_setting, dict_class=OrderedDict): """Determines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` """ if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) # Remove keys that are set to None. Extract keys first to avoid altering # the dictionary during iteration. none_keys = [k for (k, v) in merged_setting.items() if v is None] for key in none_keys: del merged_setting[key] return merged_setting def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): """Properly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ if session_hooks is None or session_hooks.get('response') == []: return request_hooks if request_hooks is None or request_hooks.get('response') == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) class SessionRedirectMixin(object): def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, **adapter_kwargs): """Receives a Response. Returns a generator of Responses.""" i = 0 hist = [] # keep track of history while resp.is_redirect: prepared_request = req.copy() if i > 0: # Update history and keep track of redirects. hist.append(resp) new_hist = list(hist) resp.history = new_hist try: resp.content # Consume socket so it can be released except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if i >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp) # Release the connection back into the pool. resp.close() url = resp.headers['location'] # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = '%s:%s' % (parsed_rurl.scheme, url) # The scheme should be lower case... parsed = urlparse(url) url = parsed.geturl() # Facilitate relative 'location' headers, as allowed by RFC 7231. # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) # Cache the url, unless it redirects to itself. if resp.is_permanent_redirect and req.url != prepared_request.url: self.redirect_cache[req.url] = prepared_request.url self.rebuild_method(prepared_request, resp) # https://github.com/kennethreitz/requests/issues/1084 if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): # https://github.com/kennethreitz/requests/issues/3490 purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared # request, use the old one that we haven't yet touched. extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) merge_cookies(prepared_request._cookies, self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) # Rebuild auth and proxy information. proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) # Override the original request. req = prepared_request resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) i += 1 yield resp def rebuild_auth(self, prepared_request, response): """When being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss. """ headers = prepared_request.headers url = prepared_request.url if 'Authorization' in headers: # If we get redirected to a new host, we should strip out any # authentication headers. original_parsed = urlparse(response.request.url) redirect_parsed = urlparse(url) if (original_parsed.hostname != redirect_parsed.hostname): del headers['Authorization'] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None if new_auth is not None: prepared_request.prepare_auth(new_auth) return def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary. :rtype: dict """ headers = prepared_request.headers url = prepared_request.url scheme = urlparse(url).scheme new_proxies = proxies.copy() if proxies is not None else {} if self.trust_env and not should_bypass_proxies(url): environ_proxies = get_environ_proxies(url) proxy = environ_proxies.get(scheme, environ_proxies.get('all')) if proxy: new_proxies.setdefault(scheme, proxy) if 'Proxy-Authorization' in headers: del headers['Proxy-Authorization'] try: username, password = get_auth_from_url(new_proxies[scheme]) except KeyError: username, password = None, None if username and password: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return new_proxies def rebuild_method(self, prepared_request, response): """When being redirected we may want to change the method of the request based on certain specs or browser behavior. """ method = prepared_request.method # http://tools.ietf.org/html/rfc7231#section-6.4.4 if response.status_code == codes.see_other and method != 'HEAD': method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if response.status_code == codes.found and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if response.status_code == codes.moved and method == 'POST': method = 'GET' prepared_request.method = method class Session(SessionRedirectMixin): """A Requests session. Provides cookie persistence, connection-pooling, and configuration. Basic Usage:: >>> import requests >>> s = requests.Session() >>> s.get('http://httpbin.org/get') <Response [200]> Or as a context manager:: >>> with requests.Session() as s: >>> s.get('http://httpbin.org/get') <Response [200]> """ __attrs__ = [ 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream', 'trust_env', 'max_redirects', ] def __init__(self): #: A case-insensitive dictionary of headers to be sent on each #: :class:`Request <Request>` sent from this #: :class:`Session <Session>`. self.headers = default_headers() #: Default Authentication tuple or object to attach to #: :class:`Request <Request>`. self.auth = None #: Dictionary mapping protocol or protocol and host to the URL of the proxy #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to #: be used on each :class:`Request <Request>`. self.proxies = {} #: Event-handling hooks. self.hooks = default_hooks() #: Dictionary of querystring data to attach to each #: :class:`Request <Request>`. The dictionary values may be lists for #: representing multivalued query parameters. self.params = {} #: Stream response content default. self.stream = False #: SSL Verification default. self.verify = True #: SSL client certificate default. self.cert = None #: Maximum number of redirects allowed. If the request exceeds this #: limit, a :class:`TooManyRedirects` exception is raised. #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is #: 30. self.max_redirects = DEFAULT_REDIRECT_LIMIT #: Trust environment settings for proxy configuration, default #: authentication and similar. self.trust_env = True #: A CookieJar containing all currently outstanding cookies set on this #: session. By default it is a #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but #: may be any other ``cookielib.CookieJar`` compatible object. self.cookies = cookiejar_from_dict({}) # Default connection adapters. self.adapters = OrderedDict() self.mount('https://', HTTPAdapter()) self.mount('http://', HTTPAdapter()) # Only store 1000 redirects to prevent using infinite memory self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE) def __enter__(self): return self def __exit__(self, *args): self.close() def prepare_request(self, request): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request <Request>` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest """ cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies) # Set environment's basic authentication if not explicitly set. auth = request.auth if self.trust_env and not auth and not self.auth: auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare( method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks), ) return p def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response """ # Create the Request. req = Request( method = method.upper(), url = url, headers = headers, files = files, data = data or {}, json = json, params = params or {}, auth = auth, cookies = cookies, hooks = hooks, ) prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings( prep.url, proxies, stream, verify, cert ) # Send the request. send_kwargs = { 'timeout': timeout, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) return resp def get(self, url, **kwargs): """Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs) def options(self, url, **kwargs): """Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('OPTIONS', url, **kwargs) def head(self, url, **kwargs): """Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return self.request('HEAD', url, **kwargs) def post(self, url, data=None, json=None, **kwargs): """Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('POST', url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): """Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): """Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): """Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('DELETE', url, **kwargs) def send(self, request, **kwargs): """ Send a given PreparedRequest. :rtype: requests.Response """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): raise ValueError('You can only send PreparedRequests.') # Set up variables needed for resolve_redirects and dispatching of hooks allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') hooks = request.hooks # Resolve URL in redirect cache, if available. if allow_redirects: checked_urls = set() while request.url in self.redirect_cache: checked_urls.add(request.url) new_url = self.redirect_cache.get(request.url) if new_url in checked_urls: break request.url = new_url # Get the appropriate adapter to use adapter = self.get_adapter(url=request.url) # Start time (approximately) of the request start = datetime.utcnow() # Send the request r = adapter.send(request, **kwargs) # Total elapsed time of the request (approximately) r.elapsed = datetime.utcnow() - start # Response manipulation hooks r = dispatch_hook('response', hooks, r, **kwargs) # Persist cookies if r.history: # If the hooks create history then we want those cookies too for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) # Redirect resolving generator. gen = self.resolve_redirects(r, request, **kwargs) # Resolve redirects if allowed. history = [resp for resp in gen] if allow_redirects else [] # Shuffle things around if there's history. if history: # Insert the first (original) request at the start history.insert(0, r) # Get the last request made r = history.pop() r.history = history if not stream: r.content return r def merge_environment_settings(self, url, proxies, stream, verify, cert): """ Check the environment and merge it with some settings. :rtype: dict """ # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. env_proxies = get_environ_proxies(url) or {} for (k, v) in env_proxies.items(): proxies.setdefault(k, v) # Look for requests environment configuration and be compatible # with cURL. if verify is True or verify is None: verify = (os.environ.get('REQUESTS_CA_BUNDLE') or os.environ.get('CURL_CA_BUNDLE')) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert} def get_adapter(self, url): """ Returns the appropriate connection adapter for the given URL. :rtype: requests.adapters.BaseAdapter """ for (prefix, adapter) in self.adapters.items(): if url.lower().startswith(prefix): return adapter # Nothing matches :-/ raise InvalidSchema("No connection adapters were found for '%s'" % url) def close(self): """Closes all adapters and as such the session""" for v in self.adapters.values(): v.close() def mount(self, prefix, adapter): """Registers a connection adapter to a prefix. Adapters are sorted in descending order by key length. """ self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] for key in keys_to_move: self.adapters[key] = self.adapters.pop(key) def __getstate__(self): state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) state['redirect_cache'] = dict(self.redirect_cache) return state def __setstate__(self, state): redirect_cache = state.pop('redirect_cache', {}) for attr, value in state.items(): setattr(self, attr, value) self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE) for redirect, to in redirect_cache.items(): self.redirect_cache[redirect] = to def session(): """ Returns a :class:`Session` for context-management. :rtype: Session """ return Session()
"""Main Lomb-Scargle Implementation""" import numpy as np from .implementations import lombscargle, available_methods from .implementations.mle import periodic_fit from . import _statistics from ... import units def has_units(obj): return hasattr(obj, 'unit') def get_unit(obj): return getattr(obj, 'unit', 1) def strip_units(*arrs): strip = lambda a: None if a is None else np.asarray(a) if len(arrs) == 1: return strip(arrs[0]) else: return map(strip, arrs) class LombScargle: """Compute the Lomb-Scargle Periodogram. This implementations here are based on code presented in [1]_ and [2]_; if you use this functionality in an academic application, citation of those works would be appreciated. Parameters ---------- t : array_like or Quantity sequence of observation times y : array_like or Quantity sequence of observations associated with times t dy : float, array_like or Quantity (optional) error or sequence of observational errors associated with times t fit_mean : bool (optional, default=True) if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool (optional, default=True) if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if fit_mean = False nterms : int (optional, default=1) number of terms to use in the Fourier fit normalization : {'standard', 'model', 'log', 'psd'}, optional Normalization to use for the periodogram. Examples -------- Generate noisy periodic data: >>> rand = np.random.RandomState(42) >>> t = 100 * rand.rand(100) >>> y = np.sin(2 * np.pi * t) + rand.randn(100) Compute the Lomb-Scargle periodogram on an automatically-determined frequency grid & find the frequency of max power: >>> frequency, power = LombScargle(t, y).autopower() >>> frequency[np.argmax(power)] # doctest: +FLOAT_CMP 1.0016662310392956 Compute the Lomb-Scargle periodogram at a user-specified frequency grid: >>> freq = np.arange(0.8, 1.3, 0.1) >>> LombScargle(t, y).power(freq) # doctest: +FLOAT_CMP array([0.0204304 , 0.01393845, 0.35552682, 0.01358029, 0.03083737]) If the inputs are astropy Quantities with units, the units will be validated and the outputs will also be Quantities with appropriate units: >>> from astropy import units as u >>> t = t * u.s >>> y = y * u.mag >>> frequency, power = LombScargle(t, y).autopower() >>> frequency.unit Unit("1 / s") >>> power.unit Unit(dimensionless) Note here that the Lomb-Scargle power is always a unitless quantity, because it is related to the :math:`\\chi^2` of the best-fit periodic model at each frequency. References ---------- .. [1] Vanderplas, J., Connolly, A. Ivezic, Z. & Gray, A. *Introduction to astroML: Machine learning for astrophysics*. Proceedings of the Conference on Intelligent Data Understanding (2012) .. [2] VanderPlas, J. & Ivezic, Z. *Periodograms for Multiband Astronomical Time Series*. ApJ 812.1:18 (2015) """ available_methods = available_methods() def __init__(self, t, y, dy=None, fit_mean=True, center_data=True, nterms=1, normalization='standard'): self.t, self.y, self.dy = self._validate_inputs(t, y, dy) self.fit_mean = fit_mean self.center_data = center_data self.nterms = nterms self.normalization = normalization def _validate_inputs(self, t, y, dy): # Validate shapes of inputs if dy is None: t, y = np.broadcast_arrays(t, y, subok=True) else: t, y, dy = np.broadcast_arrays(t, y, dy, subok=True) if t.ndim != 1: raise ValueError("Inputs (t, y, dy) must be 1-dimensional") # validate units of inputs if any is a Quantity if any(has_units(arr) for arr in (t, y, dy)): t, y = map(units.Quantity, (t, y)) if dy is not None: dy = units.Quantity(dy) try: dy = units.Quantity(dy, unit=y.unit) except units.UnitConversionError: raise ValueError("Units of dy not equivalent " "to units of y") return t, y, dy def _validate_frequency(self, frequency): frequency = np.asanyarray(frequency) if has_units(self.t): frequency = units.Quantity(frequency) try: frequency = units.Quantity(frequency, unit=1./self.t.unit) except units.UnitConversionError: raise ValueError("Units of frequency not equivalent to " "units of 1/t") else: if has_units(frequency): raise ValueError("frequency have units while 1/t doesn't.") return frequency def _validate_t(self, t): t = np.asanyarray(t) if has_units(self.t): t = units.Quantity(t) try: t = units.Quantity(t, unit=self.t.unit) except units.UnitConversionError: raise ValueError("Units of t not equivalent to " "units of input self.t") return t def _power_unit(self, norm): if has_units(self.y): if self.dy is None and norm == 'psd': return self.y.unit ** 2 else: return units.dimensionless_unscaled else: return 1 def autofrequency(self, samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, return_freq_limits=False): """Determine a suitable frequency grid for data. Note that this assumes the peak width is driven by the observational baseline, which is generally a good assumption when the baseline is much larger than the oscillation period. If you are searching for periods longer than the baseline of your observations, this may not perform well. Even with a large baseline, be aware that the maximum frequency returned is based on the concept of "average Nyquist frequency", which may not be useful for irregularly-sampled data. The maximum frequency can be adjusted via the nyquist_factor argument, or through the maximum_frequency argument. Parameters ---------- samples_per_peak : float (optional, default=5) The approximate number of desired samples across the typical peak nyquist_factor : float (optional, default=5) The multiple of the average nyquist frequency used to choose the maximum frequency if maximum_frequency is not provided. minimum_frequency : float (optional) If specified, then use this minimum frequency rather than one chosen based on the size of the baseline. maximum_frequency : float (optional) If specified, then use this maximum frequency rather than one chosen based on the average nyquist frequency. return_freq_limits : bool (optional) if True, return only the frequency limits rather than the full frequency grid. Returns ------- frequency : ndarray or Quantity The heuristically-determined optimal frequency bin """ baseline = self.t.max() - self.t.min() n_samples = self.t.size df = 1.0 / baseline / samples_per_peak if minimum_frequency is None: minimum_frequency = 0.5 * df if maximum_frequency is None: avg_nyquist = 0.5 * n_samples / baseline maximum_frequency = nyquist_factor * avg_nyquist Nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df)) if return_freq_limits: return minimum_frequency, minimum_frequency + df * (Nf - 1) else: return minimum_frequency + df * np.arange(Nf) def autopower(self, method='auto', method_kwds=None, normalization=None, samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None): """Compute Lomb-Scargle power at automatically-determined frequencies. Parameters ---------- method : string (optional) specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'slow': use the O[N^2] pure-python implementation - 'cython': use the O[N^2] cython implementation. This is slightly faster than method='slow', but much more memory efficient. - 'chi2': use the O[N^2] chi2/linear-fitting implementation - 'fastchi2': use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. method_kwds : dict (optional) additional keywords to pass to the lomb-scargle method normalization : {'standard', 'model', 'log', 'psd'}, optional If specified, override the normalization specified at instantiation. samples_per_peak : float (optional, default=5) The approximate number of desired samples across the typical peak nyquist_factor : float (optional, default=5) The multiple of the average nyquist frequency used to choose the maximum frequency if maximum_frequency is not provided. minimum_frequency : float (optional) If specified, then use this minimum frequency rather than one chosen based on the size of the baseline. maximum_frequency : float (optional) If specified, then use this maximum frequency rather than one chosen based on the average nyquist frequency. Returns ------- frequency, power : ndarrays The frequency and Lomb-Scargle power """ frequency = self.autofrequency(samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency) power = self.power(frequency, normalization=normalization, method=method, method_kwds=method_kwds, assume_regular_frequency=True) return frequency, power def power(self, frequency, normalization=None, method='auto', assume_regular_frequency=False, method_kwds=None): """Compute the Lomb-Scargle power at the given frequencies. Parameters ---------- frequency : array_like or Quantity frequencies (not angular frequencies) at which to evaluate the periodogram. Note that in order to use method='fast', frequencies must be regularly-spaced. method : string (optional) specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'slow': use the O[N^2] pure-python implementation - 'cython': use the O[N^2] cython implementation. This is slightly faster than method='slow', but much more memory efficient. - 'chi2': use the O[N^2] chi2/linear-fitting implementation - 'fastchi2': use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. assume_regular_frequency : bool (optional) if True, assume that the input frequency is of the form freq = f0 + df * np.arange(N). Only referenced if method is 'auto' or 'fast'. normalization : {'standard', 'model', 'log', 'psd'}, optional If specified, override the normalization specified at instantiation. fit_mean : bool (optional, default=True) If True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool (optional, default=True) If True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if fit_mean = False. method_kwds : dict (optional) additional keywords to pass to the lomb-scargle method Returns ------- power : ndarray The Lomb-Scargle power at the specified frequency """ if normalization is None: normalization = self.normalization frequency = self._validate_frequency(frequency) power = lombscargle(*strip_units(self.t, self.y, self.dy), frequency=strip_units(frequency), center_data=self.center_data, fit_mean=self.fit_mean, nterms=self.nterms, normalization=normalization, method=method, method_kwds=method_kwds, assume_regular_frequency=assume_regular_frequency) return power * self._power_unit(normalization) def model(self, t, frequency): """Compute the Lomb-Scargle model at the given frequency. Parameters ---------- t : array_like or Quantity, length n_samples times at which to compute the model frequency : float the frequency for the model Returns ------- y : np.ndarray, length n_samples The model fit corresponding to the input times """ frequency = self._validate_frequency(frequency) t = self._validate_t(t) y_fit = periodic_fit(*strip_units(self.t, self.y, self.dy), frequency=strip_units(frequency), t_fit=strip_units(t), center_data=self.center_data, fit_mean=self.fit_mean, nterms=self.nterms) return y_fit * get_unit(self.y) def distribution(self, power, cumulative=False): """Expected periodogram distribution under the null hypothesis. This computes the expected probability distribution or cumulative probability distribution of periodogram power, under the null hypothesis of a non-varying signal with Gaussian noise. Note that this is not the same as the expected distribution of peak values; for that see the ``false_alarm_probability()`` method. Parameters ---------- power : array_like The periodogram power at which to compute the distribution. cumulative : bool (optional) If True, then return the cumulative distribution. See Also -------- false_alarm_probability false_alarm_level Returns ------- dist : np.ndarray The probability density or cumulative probability associated with the provided powers. """ dH = 1 if self.fit_mean or self.center_data else 0 dK = dH + 2 * self.nterms dist = _statistics.cdf_single if cumulative else _statistics.pdf_single return dist(power, len(self.t), self.normalization, dH=dH, dK=dK) def false_alarm_probability(self, power, method='baluev', samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, method_kwds=None): """False alarm probability of periodogram maxima under the null hypothesis. This gives an estimate of the false alarm probability given the height of the largest peak in the periodogram, based on the null hypothesis of non-varying data with Gaussian noise. Parameters ---------- power : array-like The periodogram value. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. maximum_frequency : float The maximum frequency of the periodogram. method_kwds : dict (optional) Additional method-specific keywords. Returns ------- false_alarm_probability : np.ndarray The false alarm probability Notes ----- The true probability distribution for the largest peak cannot be determined analytically, so each method here provides an approximation to the value. The available methods are: - "baluev" (default): the upper-limit to the alias-free probability, using the approach of Baluev (2008) [1]_. - "davies" : the Davies upper bound from Baluev (2008) [1]_. - "naive" : the approximate probability based on an estimated effective number of independent frequencies. - "bootstrap" : the approximate probability based on bootstrap resamplings of the input data. Note also that for normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- distribution false_alarm_level References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if self.nterms != 1: raise NotImplementedError("false alarm probability is not " "implemented for multiterm periodograms.") if not (self.fit_mean or self.center_data): raise NotImplementedError("false alarm probability is implemented " "only for periodograms of centered data.") fmin, fmax = self.autofrequency(samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency, return_freq_limits=True) return _statistics.false_alarm_probability(power, fmax=fmax, t=self.t, y=self.y, dy=self.dy, normalization=self.normalization, method=method, method_kwds=method_kwds) def false_alarm_level(self, false_alarm_probability, method='baluev', samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, method_kwds=None): """Level of maximum at a given false alarm probability. This gives an estimate of the periodogram level corresponding to a specified false alarm probability for the largest peak, assuming a null hypothesis of non-varying data with Gaussian noise. Parameters ---------- false_alarm_probability : array-like The false alarm probability (0 < fap < 1). maximum_frequency : float The maximum frequency of the periodogram. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use; default='baluev'. method_kwds : dict, optional Additional method-specific keywords. Returns ------- power : np.ndarray The periodogram peak height corresponding to the specified false alarm probability. Notes ----- The true probability distribution for the largest peak cannot be determined analytically, so each method here provides an approximation to the value. The available methods are: - "baluev" (default): the upper-limit to the alias-free probability, using the approach of Baluev (2008) [1]_. - "davies" : the Davies upper bound from Baluev (2008) [1]_. - "naive" : the approximate probability based on an estimated effective number of independent frequencies. - "bootstrap" : the approximate probability based on bootstrap resamplings of the input data. Note also that for normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- distribution false_alarm_probability References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if self.nterms != 1: raise NotImplementedError("false alarm probability is not " "implemented for multiterm periodograms.") if not (self.fit_mean or self.center_data): raise NotImplementedError("false alarm probability is implemented " "only for periodograms of centered data.") fmin, fmax = self.autofrequency(samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency, return_freq_limits=True) return _statistics.false_alarm_level(false_alarm_probability, fmax=fmax, t=self.t, y=self.y, dy=self.dy, normalization=self.normalization, method=method, method_kwds=method_kwds)
"""Apache Configuration based off of Augeas Configurator.""" # pylint: disable=too-many-lines import itertools import logging import os import re import shutil import socket import subprocess import zope.interface from acme import challenges from letsencrypt import achallenges from letsencrypt import errors from letsencrypt import interfaces from letsencrypt import le_util from letsencrypt.plugins import common from letsencrypt_apache import augeas_configurator from letsencrypt_apache import constants from letsencrypt_apache import display_ops from letsencrypt_apache import dvsni from letsencrypt_apache import obj from letsencrypt_apache import parser logger = logging.getLogger(__name__) # TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing # tags need to be the same case, otherwise Augeas doesn't recognize them. # This is not able to be completely remedied by regular expressions because # Augeas views <VirtualHost> </Virtualhost> as an error. This will just # require another check_parsing_errors() after all files are included... # (after a find_directive search is executed currently). It can be a one # time check however because all of LE's transactions will ensure # only properly formed sections are added. # Note: This protocol works for filenames with spaces in it, the sites are # properly set up and directives are changed appropriately, but Apache won't # recognize names in sites-enabled that have spaces. These are not added to the # Apache configuration. It may be wise to warn the user if they are trying # to use vhost filenames that contain spaces and offer to change ' ' to '_' # Note: FILEPATHS and changes to files are transactional. They are copied # over before the updates are made to the existing files. NEW_FILES is # transactional due to the use of register_file_creation() # TODO: Verify permissions on configuration root... it is easier than # checking permissions on each of the relative directories and less error # prone. # TODO: Write a server protocol finder. Listen <port> <protocol> or # Protocol <protocol>. This can verify partial setups are correct # TODO: Add directives to sites-enabled... not sites-available. # sites-available doesn't allow immediate find_dir search even with save() # and load() class ApacheConfigurator(augeas_configurator.AugeasConfigurator): # pylint: disable=too-many-instance-attributes,too-many-public-methods """Apache configurator. State of Configurator: This code has been been tested and built for Ubuntu 14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2 :ivar config: Configuration. :type config: :class:`~letsencrypt.interfaces.IConfig` :ivar parser: Handles low level parsing :type parser: :class:`~letsencrypt_apache.parser` :ivar tup version: version of Apache :ivar list vhosts: All vhosts found in the configuration (:class:`list` of :class:`~letsencrypt_apache.obj.VirtualHost`) :ivar dict assoc: Mapping between domains and vhosts """ zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller) zope.interface.classProvides(interfaces.IPluginFactory) description = "Apache Web Server - Alpha" @classmethod def add_parser_arguments(cls, add): add("ctl", default=constants.CLI_DEFAULTS["ctl"], help="Path to the 'apache2ctl' binary, used for 'configtest', " "retrieving the Apache2 version number, and initialization " "parameters.") add("enmod", default=constants.CLI_DEFAULTS["enmod"], help="Path to the Apache 'a2enmod' binary.") add("dismod", default=constants.CLI_DEFAULTS["dismod"], help="Path to the Apache 'a2enmod' binary.") add("init-script", default=constants.CLI_DEFAULTS["init_script"], help="Path to the Apache init script (used for server " "reload/restart).") add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"], help="SSL vhost configuration extension.") add("server-root", default=constants.CLI_DEFAULTS["server_root"], help="Apache server root directory.") def __init__(self, *args, **kwargs): """Initialize an Apache Configurator. :param tup version: version of Apache as a tuple (2, 4, 7) (used mostly for unittesting) """ version = kwargs.pop("version", None) super(ApacheConfigurator, self).__init__(*args, **kwargs) # Add name_server association dict self.assoc = dict() # Outstanding challenges self._chall_out = set() # These will be set in the prepare function self.parser = None self.version = version self.vhosts = None self._enhance_func = {"redirect": self._enable_redirect} @property def mod_ssl_conf(self): """Full absolute path to SSL configuration file.""" return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST) def prepare(self): """Prepare the authenticator/installer. :raises .errors.NoInstallationError: If Apache configs cannot be found :raises .errors.MisconfigurationError: If Apache is misconfigured :raises .errors.NotSupportedError: If Apache version is not supported :raises .errors.PluginError: If there is any other error """ # Make sure configuration is valid self.config_test() self.parser = parser.ApacheParser( self.aug, self.conf("server-root"), self.conf("ctl")) # Check for errors in parsing files with Augeas self.check_parsing_errors("httpd.aug") # Set Version if self.version is None: self.version = self.get_version() if self.version < (2, 2): raise errors.NotSupportedError( "Apache Version %s not supported.", str(self.version)) # Get all of the available vhosts self.vhosts = self.get_virtual_hosts() temp_install(self.mod_ssl_conf) def deploy_cert(self, domain, cert_path, key_path, chain_path=None): """Deploys certificate to specified virtual host. Currently tries to find the last directives to deploy the cert in the VHost associated with the given domain. If it can't find the directives, it searches the "included" confs. The function verifies that it has located the three directives and finally modifies them to point to the correct destination. After the certificate is installed, the VirtualHost is enabled if it isn't already. .. todo:: Might be nice to remove chain directive if none exists This shouldn't happen within letsencrypt though :raises errors.PluginError: When unable to deploy certificate due to a lack of directives """ vhost = self.choose_vhost(domain) # This is done first so that ssl module is enabled and cert_path, # cert_key... can all be parsed appropriately self.prepare_server_https("443") path = {} path["cert_path"] = self.parser.find_dir( "SSLCertificateFile", None, vhost.path) path["cert_key"] = self.parser.find_dir( "SSLCertificateKeyFile", None, vhost.path) # Only include if a certificate chain is specified if chain_path is not None: path["chain_path"] = self.parser.find_dir( "SSLCertificateChainFile", None, vhost.path) if not path["cert_path"] or not path["cert_key"]: # Throw some can't find all of the directives error" logger.warn( "Cannot find a cert or key directive in %s. " "VirtualHost was not modified", vhost.path) # Presumably break here so that the virtualhost is not modified raise errors.PluginError( "Unable to find cert and/or key directives") logger.info("Deploying Certificate to VirtualHost %s", vhost.filep) # Assign the final directives; order is maintained in find_dir self.aug.set(path["cert_path"][-1], cert_path) self.aug.set(path["cert_key"][-1], key_path) if chain_path is not None: if not path["chain_path"]: self.parser.add_dir( vhost.path, "SSLCertificateChainFile", chain_path) else: self.aug.set(path["chain_path"][-1], chain_path) # Save notes about the transaction that took place self.save_notes += ("Changed vhost at %s with addresses of %s\n" "\tSSLCertificateFile %s\n" "\tSSLCertificateKeyFile %s\n" % (vhost.filep, ", ".join(str(addr) for addr in vhost.addrs), cert_path, key_path)) if chain_path is not None: self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path # Make sure vhost is enabled if not vhost.enabled: self.enable_site(vhost) def choose_vhost(self, target_name): """Chooses a virtual host based on the given domain name. If there is no clear virtual host to be selected, the user is prompted with all available choices. :param str target_name: domain name :returns: ssl vhost associated with name :rtype: :class:`~letsencrypt_apache.obj.VirtualHost` :raises .errors.PluginError: If no vhost is available or chosen """ # Allows for domain names to be associated with a virtual host if target_name in self.assoc: return self.assoc[target_name] # Try to find a reasonable vhost vhost = self._find_best_vhost(target_name) if vhost is not None: if not vhost.ssl: vhost = self.make_vhost_ssl(vhost) self.assoc[target_name] = vhost return vhost return self._choose_vhost_from_list(target_name) def _choose_vhost_from_list(self, target_name): # Select a vhost from a list vhost = display_ops.select_vhost(target_name, self.vhosts) if vhost is None: logger.error( "No vhost exists with servername or alias of: %s. " "No vhost was selected. Please specify servernames " "in the Apache config", target_name) raise errors.PluginError("No vhost selected") elif not vhost.ssl: addrs = self._get_proposed_addrs(vhost, "443") # TODO: Conflicts is too conservative if not any(vhost.enabled and vhost.conflicts(addrs) for vhost in self.vhosts): vhost = self.make_vhost_ssl(vhost) else: logger.error( "The selected vhost would conflict with other HTTPS " "VirtualHosts within Apache. Please select another " "vhost or add ServerNames to your configuration.") raise errors.PluginError( "VirtualHost not able to be selected.") self.assoc[target_name] = vhost return vhost def _find_best_vhost(self, target_name): """Finds the best vhost for a target_name. This does not upgrade a vhost to HTTPS... it only finds the most appropriate vhost for the given target_name. :returns: VHost or None """ # Points 4 - Servername SSL # Points 3 - Address name with SSL # Points 2 - Servername no SSL # Points 1 - Address name with no SSL best_candidate = None best_points = 0 for vhost in self.vhosts: if target_name in vhost.get_names(): points = 2 elif any(addr.get_addr() == target_name for addr in vhost.addrs): points = 1 else: # No points given if names can't be found. # This gets hit but doesn't register continue # pragma: no cover if vhost.ssl: points += 2 if points > best_points: best_points = points best_candidate = vhost # No winners here... is there only one reasonable vhost? if best_candidate is None: # reasonable == Not all _default_ addrs reasonable_vhosts = self._non_default_vhosts() if len(reasonable_vhosts) == 1: best_candidate = reasonable_vhosts[0] return best_candidate def _non_default_vhosts(self): """Return all non _default_ only vhosts.""" return [vh for vh in self.vhosts if not all( addr.get_addr() == "_default_" for addr in vh.addrs )] def get_all_names(self): """Returns all names found in the Apache Configuration. :returns: All ServerNames, ServerAliases, and reverse DNS entries for virtual host addresses :rtype: set """ all_names = set() for vhost in self.vhosts: all_names.update(vhost.get_names()) for addr in vhost.addrs: if common.hostname_regex.match(addr.get_addr()): all_names.add(addr.get_addr()) else: name = self.get_name_from_ip(addr) if name: all_names.add(name) return all_names def get_name_from_ip(self, addr): # pylint: disable=no-self-use """Returns a reverse dns name if available. :param addr: IP Address :type addr: ~.common.Addr :returns: name or empty string if name cannot be determined :rtype: str """ # If it isn't a private IP, do a reverse DNS lookup if not common.private_ips_regex.match(addr.get_addr()): try: socket.inet_aton(addr.get_addr()) return socket.gethostbyaddr(addr.get_addr())[0] except (socket.error, socket.herror, socket.timeout): pass return "" def _add_servernames(self, host): """Helper function for get_virtual_hosts(). :param host: In progress vhost whose names will be added :type host: :class:`~letsencrypt_apache.obj.VirtualHost` """ # Take the final ServerName as each overrides the previous servername_match = self.parser.find_dir( "ServerName", None, start=host.path, exclude=False) serveralias_match = self.parser.find_dir( "ServerAlias", None, start=host.path, exclude=False) for alias in serveralias_match: host.aliases.add(self.parser.get_arg(alias)) if servername_match: # Get last ServerName as each overwrites the previous host.name = self.parser.get_arg(servername_match[-1]) def _create_vhost(self, path): """Used by get_virtual_hosts to create vhost objects :param str path: Augeas path to virtual host :returns: newly created vhost :rtype: :class:`~letsencrypt_apache.obj.VirtualHost` """ addrs = set() args = self.aug.match(path + "/arg") for arg in args: addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg))) is_ssl = False if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False): is_ssl = True filename = get_file_path(path) is_enabled = self.is_site_enabled(filename) vhost = obj.VirtualHost(filename, path, addrs, is_ssl, is_enabled) self._add_servernames(vhost) return vhost # TODO: make "sites-available" a configurable directory def get_virtual_hosts(self): """Returns list of virtual hosts found in the Apache configuration. :returns: List of :class:`~letsencrypt_apache.obj.VirtualHost` objects found in configuration :rtype: list """ # Search sites-available, httpd.conf for possible virtual hosts paths = self.aug.match( ("/files%s/sites-available//*[label()=~regexp('%s')]" % (self.parser.root, parser.case_i("VirtualHost")))) vhs = [] for path in paths: vhs.append(self._create_vhost(path)) return vhs def is_name_vhost(self, target_addr): """Returns if vhost is a name based vhost NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are now NameVirtualHosts. If version is earlier than 2.4, check if addr has a NameVirtualHost directive in the Apache config :param letsencrypt_apache.obj.Addr target_addr: vhost address :returns: Success :rtype: bool """ # Mixed and matched wildcard NameVirtualHost with VirtualHost # behavior is undefined. Make sure that an exact match exists # search for NameVirtualHost directive for ip_addr # note ip_addr can be FQDN although Apache does not recommend it return (self.version >= (2, 4) or self.parser.find_dir("NameVirtualHost", str(target_addr))) def add_name_vhost(self, addr): """Adds NameVirtualHost directive for given address. :param addr: Address that will be added as NameVirtualHost directive :type addr: :class:`~letsencrypt_apache.obj.Addr` """ loc = parser.get_aug_path(self.parser.loc["name"]) if addr.get_port() == "443": path = self.parser.add_dir_to_ifmodssl( loc, "NameVirtualHost", [str(addr)]) else: path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)]) msg = ("Setting %s to be NameBasedVirtualHost\n" "\tDirective added to %s\n" % (addr, path)) logger.debug(msg) self.save_notes += msg def prepare_server_https(self, port, temp=False): """Prepare the server for HTTPS. Make sure that the ssl_module is loaded and that the server is appropriately listening on port. :param str port: Port to listen on """ if "ssl_module" not in self.parser.modules: logger.info("Loading mod_ssl into Apache Server") self.enable_mod("ssl", temp=temp) # Check for Listen <port> # Note: This could be made to also look for ip:443 combo if not self.parser.find_dir("Listen", port): logger.debug("No Listen %s directive found. Setting the " "Apache Server to Listen on port %s", port, port) if port == "443": args = [port] else: # Non-standard ports should specify https protocol args = [port, "https"] self.parser.add_dir_to_ifmodssl( parser.get_aug_path( self.parser.loc["listen"]), "Listen", args) self.save_notes += "Added Listen %s directive to %s\n" % ( port, self.parser.loc["listen"]) def make_addrs_sni_ready(self, addrs): """Checks to see if the server is ready for SNI challenges. :param addrs: Addresses to check SNI compatibility :type addrs: :class:`~letsencrypt_apache.obj.Addr` """ # Version 2.4 and later are automatically SNI ready. if self.version >= (2, 4): return for addr in addrs: if not self.is_name_vhost(addr): logger.debug("Setting VirtualHost at %s to be a name " "based virtual host", addr) self.add_name_vhost(addr) def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals """Makes an ssl_vhost version of a nonssl_vhost. Duplicates vhost and adds default ssl options New vhost will reside as (nonssl_vhost.path) + ``letsencrypt_apache.constants.CLI_DEFAULTS["le_vhost_ext"]`` .. note:: This function saves the configuration :param nonssl_vhost: Valid VH that doesn't have SSLEngine on :type nonssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :returns: SSL vhost :rtype: :class:`~letsencrypt_apache.obj.VirtualHost` :raises .errors.PluginError: If more than one virtual host is in the file or if plugin is unable to write/read vhost files. """ avail_fp = nonssl_vhost.filep ssl_fp = self._get_ssl_vhost_path(avail_fp) self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp) # Reload augeas to take into account the new vhost self.aug.load() # Get Vhost augeas path for new vhost vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" % (ssl_fp, parser.case_i("VirtualHost"))) if len(vh_p) != 1: logger.error("Error: should only be one vhost in %s", avail_fp) raise errors.PluginError("Only one vhost per file is allowed") else: # This simplifies the process vh_p = vh_p[0] # Update Addresses self._update_ssl_vhosts_addrs(vh_p) # Add directives self._add_dummy_ssl_directives(vh_p) # Log actions and create save notes logger.info("Created an SSL vhost at %s", ssl_fp) self.save_notes += "Created ssl vhost at %s\n" % ssl_fp self.save() # We know the length is one because of the assertion above # Create the Vhost object ssl_vhost = self._create_vhost(vh_p) self.vhosts.append(ssl_vhost) # NOTE: Searches through Augeas seem to ruin changes to directives # The configuration must also be saved before being searched # for the new directives; For these reasons... this is tacked # on after fully creating the new vhost # Now check if addresses need to be added as NameBasedVhost addrs # This is for compliance with versions of Apache < 2.4 self._add_name_vhost_if_necessary(ssl_vhost) return ssl_vhost def _get_ssl_vhost_path(self, non_ssl_vh_fp): # Get filepath of new ssl_vhost if non_ssl_vh_fp.endswith(".conf"): return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext") else: return non_ssl_vh_fp + self.conf("le_vhost_ext") def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp): """Copies over existing Vhost with IfModule mod_ssl.c> skeleton. :param str avail_fp: Pointer to the original available non-ssl vhost :param str ssl_fp: Full path where the new ssl_vhost will reside. A new file is created on the filesystem. """ # First register the creation so that it is properly removed if # configuration is rolled back self.reverter.register_file_creation(False, ssl_fp) try: with open(avail_fp, "r") as orig_file: with open(ssl_fp, "w") as new_file: new_file.write("<IfModule mod_ssl.c>\n") for line in orig_file: new_file.write(line) new_file.write("</IfModule>\n") except IOError: logger.fatal("Error writing/reading to file in make_vhost_ssl") raise errors.PluginError("Unable to write/read in make_vhost_ssl") def _update_ssl_vhosts_addrs(self, vh_path): ssl_addrs = set() ssl_addr_p = self.aug.match(vh_path + "/arg") for addr in ssl_addr_p: old_addr = obj.Addr.fromstring( str(self.parser.get_arg(addr))) ssl_addr = old_addr.get_addr_obj("443") self.aug.set(addr, str(ssl_addr)) ssl_addrs.add(ssl_addr) return ssl_addrs def _add_dummy_ssl_directives(self, vh_path): self.parser.add_dir(vh_path, "SSLCertificateFile", "insert_cert_file_path") self.parser.add_dir(vh_path, "SSLCertificateKeyFile", "insert_key_file_path") self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf) def _add_name_vhost_if_necessary(self, vhost): """Add NameVirtualHost Directives if necessary for new vhost. NameVirtualHosts was a directive in Apache < 2.4 https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost :param vhost: New virtual host that was recently created. :type vhost: :class:`~letsencrypt_apache.obj.VirtualHost` """ need_to_save = False # See if the exact address appears in any other vhost # Remember 1.1.1.1:* == 1.1.1.1 -> hence any() for addr in vhost.addrs: for test_vh in self.vhosts: if (vhost.filep != test_vh.filep and any(test_addr == addr for test_addr in test_vh.addrs) and not self.is_name_vhost(addr)): self.add_name_vhost(addr) logger.info("Enabling NameVirtualHosts on %s", addr) need_to_save = True if need_to_save: self.save() ############################################################################ # Enhancements ############################################################################ def supported_enhancements(self): # pylint: disable=no-self-use """Returns currently supported enhancements.""" return ["redirect"] def enhance(self, domain, enhancement, options=None): """Enhance configuration. :param str domain: domain to enhance :param str enhancement: enhancement type defined in :const:`~letsencrypt.constants.ENHANCEMENTS` :param options: options for the enhancement See :const:`~letsencrypt.constants.ENHANCEMENTS` documentation for appropriate parameter. :raises .errors.PluginError: If Enhancement is not supported, or if there is any other problem with the enhancement. """ try: func = self._enhance_func[enhancement] except KeyError: raise errors.PluginError( "Unsupported enhancement: {0}".format(enhancement)) try: func(self.choose_vhost(domain), options) except errors.PluginError: logger.warn("Failed %s for %s", enhancement, domain) raise def _enable_redirect(self, ssl_vhost, unused_options): """Redirect all equivalent HTTP traffic to ssl_vhost. .. todo:: This enhancement should be rewritten and will unfortunately require lots of debugging by hand. Adds Redirect directive to the port 80 equivalent of ssl_vhost First the function attempts to find the vhost with equivalent ip addresses that serves on non-ssl ports The function then adds the directive .. note:: This function saves the configuration :param ssl_vhost: Destination of traffic, an ssl enabled vhost :type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :param unused_options: Not currently used :type unused_options: Not Available :returns: Success, general_vhost (HTTP vhost) :rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`) :raises .errors.PluginError: If no viable HTTP host can be created or used for the redirect. """ if "rewrite_module" not in self.parser.modules: self.enable_mod("rewrite") general_vh = self._get_http_vhost(ssl_vhost) if general_vh is None: # Add virtual_server with redirect logger.debug("Did not find http version of ssl virtual host " "attempting to create") redirect_addrs = self._get_proposed_addrs(ssl_vhost) for vhost in self.vhosts: if vhost.enabled and vhost.conflicts(redirect_addrs): raise errors.PluginError( "Unable to find corresponding HTTP vhost; " "Unable to create one as intended addresses conflict; " "Current configuration does not support automated " "redirection") self._create_redirect_vhost(ssl_vhost) else: # Check if redirection already exists self._verify_no_redirects(general_vh) # Add directives to server # Note: These are not immediately searchable in sites-enabled # even with save() and load() self.parser.add_dir(general_vh.path, "RewriteEngine", "on") self.parser.add_dir(general_vh.path, "RewriteRule", constants.REWRITE_HTTPS_ARGS) self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" % (general_vh.filep, ssl_vhost.filep)) self.save() logger.info("Redirecting vhost in %s to ssl vhost in %s", general_vh.filep, ssl_vhost.filep) def _verify_no_redirects(self, vhost): """Checks to see if existing redirect is in place. Checks to see if virtualhost already contains a rewrite or redirect returns boolean, integer :param vhost: vhost to check :type vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :raises errors.PluginError: When another redirection exists """ rewrite_path = self.parser.find_dir( "RewriteRule", None, start=vhost.path) redirect_path = self.parser.find_dir("Redirect", None, start=vhost.path) if redirect_path: # "Existing Redirect directive for virtualhost" raise errors.PluginError("Existing Redirect present on HTTP vhost.") if rewrite_path: # "No existing redirection for virtualhost" if len(rewrite_path) != len(constants.REWRITE_HTTPS_ARGS): raise errors.PluginError("Unknown Existing RewriteRule") for match, arg in itertools.izip( rewrite_path, constants.REWRITE_HTTPS_ARGS): if self.aug.get(match) != arg: raise errors.PluginError("Unknown Existing RewriteRule") raise errors.PluginError( "Let's Encrypt has already enabled redirection") def _create_redirect_vhost(self, ssl_vhost): """Creates an http_vhost specifically to redirect for the ssl_vhost. :param ssl_vhost: ssl vhost :type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :returns: tuple of the form (`success`, :class:`~letsencrypt_apache.obj.VirtualHost`) :rtype: tuple """ text = self._get_redirect_config_str(ssl_vhost) redirect_filepath = self._write_out_redirect(ssl_vhost, text) self.aug.load() # Make a new vhost data structure and add it to the lists new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath)) self.vhosts.append(new_vhost) # Finally create documentation for the change self.save_notes += ("Created a port 80 vhost, %s, for redirection to " "ssl vhost %s\n" % (new_vhost.filep, ssl_vhost.filep)) def _get_redirect_config_str(self, ssl_vhost): # get servernames and serveraliases serveralias = "" servername = "" if ssl_vhost.name is not None: servername = "ServerName " + ssl_vhost.name if ssl_vhost.aliases: serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases) return ("<VirtualHost %s>\n" "%s \n" "%s \n" "ServerSignature Off\n" "\n" "RewriteEngine On\n" "RewriteRule %s\n" "\n" "ErrorLog /var/log/apache2/redirect.error.log\n" "LogLevel warn\n" "</VirtualHost>\n" % (" ".join(str(addr) for addr in self._get_proposed_addrs(ssl_vhost)), servername, serveralias, " ".join(constants.REWRITE_HTTPS_ARGS))) def _write_out_redirect(self, ssl_vhost, text): # This is the default name redirect_filename = "le-redirect.conf" # See if a more appropriate name can be applied if ssl_vhost.name is not None: # make sure servername doesn't exceed filename length restriction if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)): redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name redirect_filepath = os.path.join( self.parser.root, "sites-available", redirect_filename) # Register the new file that will be created # Note: always register the creation before writing to ensure file will # be removed in case of unexpected program exit self.reverter.register_file_creation(False, redirect_filepath) # Write out file with open(redirect_filepath, "w") as redirect_file: redirect_file.write(text) logger.info("Created redirect file: %s", redirect_filename) return redirect_filepath def _get_http_vhost(self, ssl_vhost): """Find appropriate HTTP vhost for ssl_vhost.""" # First candidate vhosts filter candidate_http_vhs = [ vhost for vhost in self.vhosts if not vhost.ssl ] # Second filter - check addresses for http_vh in candidate_http_vhs: if http_vh.same_server(ssl_vhost): return http_vh return None def _get_proposed_addrs(self, vhost, port="80"): # pylint: disable=no-self-use """Return all addrs of vhost with the port replaced with the specified. :param obj.VirtualHost ssl_vhost: Original Vhost :param str port: Desired port for new addresses :returns: `set` of :class:`~obj.Addr` """ redirects = set() for addr in vhost.addrs: redirects.add(addr.get_addr_obj(port)) return redirects def get_all_certs_keys(self): """Find all existing keys, certs from configuration. Retrieve all certs and keys set in VirtualHosts on the Apache server :returns: list of tuples with form [(cert, key, path)] cert - str path to certificate file key - str path to associated key file path - File path to configuration file. :rtype: list """ c_k = set() for vhost in self.vhosts: if vhost.ssl: cert_path = self.parser.find_dir( "SSLCertificateFile", None, start=vhost.path, exclude=False) key_path = self.parser.find_dir( "SSLCertificateKeyFile", None, start=vhost.path, exclude=False) if cert_path and key_path: cert = os.path.abspath(self.parser.get_arg(cert_path[-1])) key = os.path.abspath(self.parser.get_arg(key_path[-1])) c_k.add((cert, key, get_file_path(cert_path[-1]))) else: logger.warning( "Invalid VirtualHost configuration - %s", vhost.filep) return c_k def is_site_enabled(self, avail_fp): """Checks to see if the given site is enabled. .. todo:: fix hardcoded sites-enabled, check os.path.samefile :param str avail_fp: Complete file path of available site :returns: Success :rtype: bool """ enabled_dir = os.path.join(self.parser.root, "sites-enabled") for entry in os.listdir(enabled_dir): if os.path.realpath(os.path.join(enabled_dir, entry)) == avail_fp: return True return False def enable_site(self, vhost): """Enables an available site, Apache restart required. .. note:: Does not make sure that the site correctly works or that all modules are enabled appropriately. .. todo:: This function should number subdomains before the domain vhost .. todo:: Make sure link is not broken... :param vhost: vhost to enable :type vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :raises .errors.NotSupportedError: If filesystem layout is not supported. """ if self.is_site_enabled(vhost.filep): return if "/sites-available/" in vhost.filep: enabled_path = ("%s/sites-enabled/%s" % (self.parser.root, os.path.basename(vhost.filep))) self.reverter.register_file_creation(False, enabled_path) os.symlink(vhost.filep, enabled_path) vhost.enabled = True logger.info("Enabling available site: %s", vhost.filep) self.save_notes += "Enabled site %s\n" % vhost.filep else: raise errors.NotSupportedError( "Unsupported filesystem layout. " "sites-available/enabled expected.") def enable_mod(self, mod_name, temp=False): """Enables module in Apache. Both enables and restarts Apache so module is active. :param str mod_name: Name of the module to enable. (e.g. 'ssl') :param bool temp: Whether or not this is a temporary action. :raises .errors.NotSupportedError: If the filesystem layout is not supported. :raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be run. """ # Support Debian specific setup if (not os.path.isdir(os.path.join(self.parser.root, "mods-available")) or not os.path.isdir( os.path.join(self.parser.root, "mods-enabled"))): raise errors.NotSupportedError( "Unsupported directory layout. You may try to enable mod %s " "and try again." % mod_name) self._enable_mod_debian(mod_name, temp) self.save_notes += "Enabled %s module in Apache" % mod_name logger.debug("Enabled Apache %s module", mod_name) # Modules can enable additional config files. Variables may be defined # within these new configuration sections. # Restart is not necessary as DUMP_RUN_CFG uses latest config. self.parser.update_runtime_variables(self.conf("ctl")) self.parser.modules.add(mod_name + "_module") self.parser.modules.add("mod_" + mod_name + ".c") def _enable_mod_debian(self, mod_name, temp): """Assumes mods-available, mods-enabled layout.""" # Generate reversal command. # Try to be safe here... check that we can probably reverse before # applying enmod command if not le_util.exe_exists(self.conf("dismod")): raise errors.MisconfigurationError( "Unable to find a2dismod, please make sure a2enmod and " "a2dismod are configured correctly for letsencrypt.") self.reverter.register_undo_command( temp, [self.conf("dismod"), mod_name]) le_util.run_script([self.conf("enmod"), mod_name]) def restart(self): """Restarts apache server. .. todo:: This function will be converted to using reload :raises .errors.MisconfigurationError: If unable to restart due to a configuration problem, or if the restart subprocess cannot be run. """ return apache_restart(self.conf("init-script")) def config_test(self): # pylint: disable=no-self-use """Check the configuration of Apache for errors. :raises .errors.MisconfigurationError: If config_test fails """ try: le_util.run_script([self.conf("ctl"), "configtest"]) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) def get_version(self): """Return version of Apache Server. Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7)) :returns: version :rtype: tuple :raises .PluginError: if unable to find Apache version """ try: stdout, _ = le_util.run_script([self.conf("ctl"), "-v"]) except errors.SubprocessError: raise errors.PluginError( "Unable to run %s -v" % self.conf("ctl")) regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE) matches = regex.findall(stdout) if len(matches) != 1: raise errors.PluginError("Unable to find Apache version") return tuple([int(i) for i in matches[0].split(".")]) def more_info(self): """Human-readable string to help understand the module""" return ( "Configures Apache to authenticate and install HTTPS.{0}" "Server root: {root}{0}" "Version: {version}".format( os.linesep, root=self.parser.loc["root"], version=".".join(str(i) for i in self.version)) ) ########################################################################### # Challenges Section ########################################################################### def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use """Return list of challenge preferences.""" return [challenges.DVSNI] def perform(self, achalls): """Perform the configuration related challenge. This function currently assumes all challenges will be fulfilled. If this turns out not to be the case in the future. Cleanup and outstanding challenges will have to be designed better. """ self._chall_out.update(achalls) responses = [None] * len(achalls) apache_dvsni = dvsni.ApacheDvsni(self) for i, achall in enumerate(achalls): if isinstance(achall, achallenges.DVSNI): # Currently also have dvsni hold associated index # of the challenge. This helps to put all of the responses back # together when they are all complete. apache_dvsni.add_chall(achall, i) sni_response = apache_dvsni.perform() if sni_response: # Must restart in order to activate the challenges. # Handled here because we may be able to load up other challenge # types self.restart() # Go through all of the challenges and assign them to the proper # place in the responses return value. All responses must be in the # same order as the original challenges. for i, resp in enumerate(sni_response): responses[apache_dvsni.indices[i]] = resp return responses def cleanup(self, achalls): """Revert all challenges.""" self._chall_out.difference_update(achalls) # If all of the challenges have been finished, clean up everything if not self._chall_out: self.revert_challenge_config() self.restart() self.parser.init_modules() def apache_restart(apache_init_script): """Restarts the Apache Server. :param str apache_init_script: Path to the Apache init script. .. todo:: Try to use reload instead. (This caused timing problems before) .. todo:: On failure, this should be a recovery_routine call with another restart. This will confuse and inhibit developers from testing code though. This change should happen after the ApacheConfigurator has been thoroughly tested. The function will need to be moved into the class again. Perhaps this version can live on... for testing purposes. :raises .errors.MisconfigurationError: If unable to restart due to a configuration problem, or if the restart subprocess cannot be run. """ try: proc = subprocess.Popen([apache_init_script, "restart"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except (OSError, ValueError): logger.fatal( "Unable to restart the Apache process with %s", apache_init_script) raise errors.MisconfigurationError( "Unable to restart Apache process with %s" % apache_init_script) stdout, stderr = proc.communicate() if proc.returncode != 0: # Enter recovery routine... logger.error("Apache Restart Failed!\n%s\n%s", stdout, stderr) raise errors.MisconfigurationError( "Error while restarting Apache:\n%s\n%s" % (stdout, stderr)) def get_file_path(vhost_path): """Get file path from augeas_vhost_path. Takes in Augeas path and returns the file name :param str vhost_path: Augeas virtual host path :returns: filename of vhost :rtype: str """ # Strip off /files avail_fp = vhost_path[6:] # This can be optimized... while True: # Cast both to lowercase to be case insensitive find_if = avail_fp.lower().find("/ifmodule") if find_if != -1: avail_fp = avail_fp[:find_if] continue find_vh = avail_fp.lower().find("/virtualhost") if find_vh != -1: avail_fp = avail_fp[:find_vh] continue break return avail_fp def temp_install(options_ssl): """Temporary install for convenience.""" # WARNING: THIS IS A POTENTIAL SECURITY VULNERABILITY # THIS SHOULD BE HANDLED BY THE PACKAGE MANAGER # AND TAKEN OUT BEFORE RELEASE, INSTEAD # SHOWING A NICE ERROR MESSAGE ABOUT THE PROBLEM. # Check to make sure options-ssl.conf is installed if not os.path.isfile(options_ssl): shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
from datetime import datetime import numpy as np import pytest import pandas.util._test_decorators as td from pandas import ( DataFrame, DatetimeIndex, Series, concat, isna, notna, ) import pandas._testing as tm import pandas.tseries.offsets as offsets @pytest.mark.parametrize( "compare_func, roll_func, kwargs", [ [np.mean, "mean", {}], [np.nansum, "sum", {}], pytest.param( lambda x: np.isfinite(x).astype(float).sum(), "count", {}, marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"), ), [np.median, "median", {}], [np.min, "min", {}], [np.max, "max", {}], [lambda x: np.std(x, ddof=1), "std", {}], [lambda x: np.std(x, ddof=0), "std", {"ddof": 0}], [lambda x: np.var(x, ddof=1), "var", {}], [lambda x: np.var(x, ddof=0), "var", {"ddof": 0}], ], ) def test_series(series, compare_func, roll_func, kwargs): result = getattr(series.rolling(50), roll_func)(**kwargs) assert isinstance(result, Series) tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:])) @pytest.mark.parametrize( "compare_func, roll_func, kwargs", [ [np.mean, "mean", {}], [np.nansum, "sum", {}], pytest.param( lambda x: np.isfinite(x).astype(float).sum(), "count", {}, marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"), ), [np.median, "median", {}], [np.min, "min", {}], [np.max, "max", {}], [lambda x: np.std(x, ddof=1), "std", {}], [lambda x: np.std(x, ddof=0), "std", {"ddof": 0}], [lambda x: np.var(x, ddof=1), "var", {}], [lambda x: np.var(x, ddof=0), "var", {"ddof": 0}], ], ) def test_frame(raw, frame, compare_func, roll_func, kwargs): result = getattr(frame.rolling(50), roll_func)(**kwargs) assert isinstance(result, DataFrame) tm.assert_series_equal( result.iloc[-1, :], frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw), check_names=False, ) @pytest.mark.parametrize( "compare_func, roll_func, kwargs, minp", [ [np.mean, "mean", {}, 10], [np.nansum, "sum", {}, 10], [lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0], [np.median, "median", {}, 10], [np.min, "min", {}, 10], [np.max, "max", {}, 10], [lambda x: np.std(x, ddof=1), "std", {}, 10], [lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10], [lambda x: np.var(x, ddof=1), "var", {}, 10], [lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10], ], ) def test_time_rule_series(series, compare_func, roll_func, kwargs, minp): win = 25 ser = series[::2].resample("B").mean() series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)( **kwargs ) last_date = series_result.index[-1] prev_date = last_date - 24 * offsets.BDay() trunc_series = series[::2].truncate(prev_date, last_date) tm.assert_almost_equal(series_result[-1], compare_func(trunc_series)) @pytest.mark.parametrize( "compare_func, roll_func, kwargs, minp", [ [np.mean, "mean", {}, 10], [np.nansum, "sum", {}, 10], [lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0], [np.median, "median", {}, 10], [np.min, "min", {}, 10], [np.max, "max", {}, 10], [lambda x: np.std(x, ddof=1), "std", {}, 10], [lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10], [lambda x: np.var(x, ddof=1), "var", {}, 10], [lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10], ], ) def test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp): win = 25 frm = frame[::2].resample("B").mean() frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)( **kwargs ) last_date = frame_result.index[-1] prev_date = last_date - 24 * offsets.BDay() trunc_frame = frame[::2].truncate(prev_date, last_date) tm.assert_series_equal( frame_result.xs(last_date), trunc_frame.apply(compare_func, raw=raw), check_names=False, ) @pytest.mark.parametrize( "compare_func, roll_func, kwargs", [ [np.mean, "mean", {}], [np.nansum, "sum", {}], [np.median, "median", {}], [np.min, "min", {}], [np.max, "max", {}], [lambda x: np.std(x, ddof=1), "std", {}], [lambda x: np.std(x, ddof=0), "std", {"ddof": 0}], [lambda x: np.var(x, ddof=1), "var", {}], [lambda x: np.var(x, ddof=0), "var", {"ddof": 0}], ], ) def test_nans(compare_func, roll_func, kwargs): obj = Series(np.random.randn(50)) obj[:10] = np.NaN obj[-10:] = np.NaN result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs) tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10])) # min_periods is working correctly result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs) assert isna(result.iloc[23]) assert not isna(result.iloc[24]) assert not isna(result.iloc[-6]) assert isna(result.iloc[-5]) obj2 = Series(np.random.randn(20)) result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs) assert isna(result.iloc[3]) assert notna(result.iloc[4]) if roll_func != "sum": result0 = getattr(obj.rolling(20, min_periods=0), roll_func)(**kwargs) result1 = getattr(obj.rolling(20, min_periods=1), roll_func)(**kwargs) tm.assert_almost_equal(result0, result1) def test_nans_count(): obj = Series(np.random.randn(50)) obj[:10] = np.NaN obj[-10:] = np.NaN result = obj.rolling(50, min_periods=30).count() tm.assert_almost_equal( result.iloc[-1], np.isfinite(obj[10:-10]).astype(float).sum() ) @pytest.mark.parametrize( "roll_func, kwargs", [ ["mean", {}], ["sum", {}], ["median", {}], ["min", {}], ["max", {}], ["std", {}], ["std", {"ddof": 0}], ["var", {}], ["var", {"ddof": 0}], ], ) @pytest.mark.parametrize("minp", [0, 99, 100]) def test_min_periods(series, minp, roll_func, kwargs): result = getattr(series.rolling(len(series) + 1, min_periods=minp), roll_func)( **kwargs ) expected = getattr(series.rolling(len(series), min_periods=minp), roll_func)( **kwargs ) nan_mask = isna(result) tm.assert_series_equal(nan_mask, isna(expected)) nan_mask = ~nan_mask tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) def test_min_periods_count(series): result = series.rolling(len(series) + 1, min_periods=0).count() expected = series.rolling(len(series), min_periods=0).count() nan_mask = isna(result) tm.assert_series_equal(nan_mask, isna(expected)) nan_mask = ~nan_mask tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) @pytest.mark.parametrize( "roll_func, kwargs, minp", [ ["mean", {}, 15], ["sum", {}, 15], ["count", {}, 0], ["median", {}, 15], ["min", {}, 15], ["max", {}, 15], ["std", {}, 15], ["std", {"ddof": 0}, 15], ["var", {}, 15], ["var", {"ddof": 0}, 15], ], ) def test_center(roll_func, kwargs, minp): obj = Series(np.random.randn(50)) obj[:10] = np.NaN obj[-10:] = np.NaN result = getattr(obj.rolling(20, min_periods=minp, center=True), roll_func)( **kwargs ) expected = getattr( concat([obj, Series([np.NaN] * 9)]).rolling(20, min_periods=minp), roll_func )(**kwargs)[9:].reset_index(drop=True) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "roll_func, kwargs, minp, fill_value", [ ["mean", {}, 10, None], ["sum", {}, 10, None], ["count", {}, 0, 0], ["median", {}, 10, None], ["min", {}, 10, None], ["max", {}, 10, None], ["std", {}, 10, None], ["std", {"ddof": 0}, 10, None], ["var", {}, 10, None], ["var", {"ddof": 0}, 10, None], ], ) def test_center_reindex_series(series, roll_func, kwargs, minp, fill_value): # shifter index s = [f"x{x:d}" for x in range(12)] series_xp = ( getattr( series.reindex(list(series.index) + s).rolling(window=25, min_periods=minp), roll_func, )(**kwargs) .shift(-12) .reindex(series.index) ) series_rs = getattr( series.rolling(window=25, min_periods=minp, center=True), roll_func )(**kwargs) if fill_value is not None: series_xp = series_xp.fillna(fill_value) tm.assert_series_equal(series_xp, series_rs) @pytest.mark.parametrize( "roll_func, kwargs, minp, fill_value", [ ["mean", {}, 10, None], ["sum", {}, 10, None], ["count", {}, 0, 0], ["median", {}, 10, None], ["min", {}, 10, None], ["max", {}, 10, None], ["std", {}, 10, None], ["std", {"ddof": 0}, 10, None], ["var", {}, 10, None], ["var", {"ddof": 0}, 10, None], ], ) def test_center_reindex_frame(frame, roll_func, kwargs, minp, fill_value): # shifter index s = [f"x{x:d}" for x in range(12)] frame_xp = ( getattr( frame.reindex(list(frame.index) + s).rolling(window=25, min_periods=minp), roll_func, )(**kwargs) .shift(-12) .reindex(frame.index) ) frame_rs = getattr( frame.rolling(window=25, min_periods=minp, center=True), roll_func )(**kwargs) if fill_value is not None: frame_xp = frame_xp.fillna(fill_value) tm.assert_frame_equal(frame_xp, frame_rs) @pytest.mark.parametrize( "f", [ lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False), lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False), lambda x: x.rolling(window=10, min_periods=5).max(), lambda x: x.rolling(window=10, min_periods=5).min(), lambda x: x.rolling(window=10, min_periods=5).sum(), lambda x: x.rolling(window=10, min_periods=5).mean(), lambda x: x.rolling(window=10, min_periods=5).std(), lambda x: x.rolling(window=10, min_periods=5).var(), lambda x: x.rolling(window=10, min_periods=5).skew(), lambda x: x.rolling(window=10, min_periods=5).kurt(), lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5), lambda x: x.rolling(window=10, min_periods=5).median(), lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False), lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True), pytest.param( lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), marks=td.skip_if_no_scipy, ), ], ) def test_rolling_functions_window_non_shrinkage(f): # GH 7764 s = Series(range(4)) s_expected = Series(np.nan, index=s.index) df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"]) df_expected = DataFrame(np.nan, index=df.index, columns=df.columns) s_result = f(s) tm.assert_series_equal(s_result, s_expected) df_result = f(df) tm.assert_frame_equal(df_result, df_expected) def test_rolling_max_gh6297(): """Replicate result expected in GH #6297""" indices = [datetime(1975, 1, i) for i in range(1, 6)] # So that we can have 2 datapoints on one of the days indices.append(datetime(1975, 1, 3, 6, 0)) series = Series(range(1, 7), index=indices) # Use floats instead of ints as values series = series.map(lambda x: float(x)) # Sort chronologically series = series.sort_index() expected = Series( [1.0, 2.0, 6.0, 4.0, 5.0], index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"), ) x = series.resample("D").max().rolling(window=1).max() tm.assert_series_equal(expected, x) def test_rolling_max_resample(): indices = [datetime(1975, 1, i) for i in range(1, 6)] # So that we can have 3 datapoints on last day (4, 10, and 20) indices.append(datetime(1975, 1, 5, 1)) indices.append(datetime(1975, 1, 5, 2)) series = Series(list(range(0, 5)) + [10, 20], index=indices) # Use floats instead of ints as values series = series.map(lambda x: float(x)) # Sort chronologically series = series.sort_index() # Default how should be max expected = Series( [0.0, 1.0, 2.0, 3.0, 20.0], index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"), ) x = series.resample("D").max().rolling(window=1).max() tm.assert_series_equal(expected, x) # Now specify median (10.0) expected = Series( [0.0, 1.0, 2.0, 3.0, 10.0], index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"), ) x = series.resample("D").median().rolling(window=1).max() tm.assert_series_equal(expected, x) # Now specify mean (4+10+20)/3 v = (4.0 + 10.0 + 20.0) / 3.0 expected = Series( [0.0, 1.0, 2.0, 3.0, v], index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"), ) x = series.resample("D").mean().rolling(window=1).max() tm.assert_series_equal(expected, x) def test_rolling_min_resample(): indices = [datetime(1975, 1, i) for i in range(1, 6)] # So that we can have 3 datapoints on last day (4, 10, and 20) indices.append(datetime(1975, 1, 5, 1)) indices.append(datetime(1975, 1, 5, 2)) series = Series(list(range(0, 5)) + [10, 20], index=indices) # Use floats instead of ints as values series = series.map(lambda x: float(x)) # Sort chronologically series = series.sort_index() # Default how should be min expected = Series( [0.0, 1.0, 2.0, 3.0, 4.0], index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"), ) r = series.resample("D").min().rolling(window=1) tm.assert_series_equal(expected, r.min()) def test_rolling_median_resample(): indices = [datetime(1975, 1, i) for i in range(1, 6)] # So that we can have 3 datapoints on last day (4, 10, and 20) indices.append(datetime(1975, 1, 5, 1)) indices.append(datetime(1975, 1, 5, 2)) series = Series(list(range(0, 5)) + [10, 20], index=indices) # Use floats instead of ints as values series = series.map(lambda x: float(x)) # Sort chronologically series = series.sort_index() # Default how should be median expected = Series( [0.0, 1.0, 2.0, 3.0, 10], index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"), ) x = series.resample("D").median().rolling(window=1).median() tm.assert_series_equal(expected, x) def test_rolling_median_memory_error(): # GH11722 n = 20000 Series(np.random.randn(n)).rolling(window=2, center=False).median() Series(np.random.randn(n)).rolling(window=2, center=False).median() @pytest.mark.parametrize( "data_type", [np.dtype(f"f{width}") for width in [4, 8]] + [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"], ) def test_rolling_min_max_numeric_types(data_type): # GH12373 # Just testing that these don't throw exceptions and that # the return type is float64. Other tests will cover quantitative # correctness result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max() assert result.dtypes[0] == np.dtype("f8") result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min() assert result.dtypes[0] == np.dtype("f8") @pytest.mark.parametrize( "f", [ lambda x: x.rolling(window=10, min_periods=0).count(), lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False), lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False), lambda x: x.rolling(window=10, min_periods=5).max(), lambda x: x.rolling(window=10, min_periods=5).min(), lambda x: x.rolling(window=10, min_periods=5).sum(), lambda x: x.rolling(window=10, min_periods=5).mean(), lambda x: x.rolling(window=10, min_periods=5).std(), lambda x: x.rolling(window=10, min_periods=5).var(), lambda x: x.rolling(window=10, min_periods=5).skew(), lambda x: x.rolling(window=10, min_periods=5).kurt(), lambda x: x.rolling(window=10, min_periods=5).quantile(0.5), lambda x: x.rolling(window=10, min_periods=5).median(), lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False), lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True), pytest.param( lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), marks=td.skip_if_no_scipy, ), ], ) def test_moment_functions_zero_length(f): # GH 8056 s = Series(dtype=np.float64) s_expected = s df1 = DataFrame() df1_expected = df1 df2 = DataFrame(columns=["a"]) df2["a"] = df2["a"].astype("float64") df2_expected = df2 s_result = f(s) tm.assert_series_equal(s_result, s_expected) df1_result = f(df1) tm.assert_frame_equal(df1_result, df1_expected) df2_result = f(df2) tm.assert_frame_equal(df2_result, df2_expected)
# -*- coding: utf-8 -*- import os import sys try: from gluon import current except ImportError: print >> sys.stderr, """ The installed version of Web2py is too old -- it does not define current. Please upgrade Web2py to a more recent version. """ # Version of 000_config.py # Increment this if the user should update their running instance VERSION = 1 #def update_check(environment, template="default"): def update_check(settings): """ Check whether the dependencies are sufficient to run Eden @ToDo: Load deployment_settings so that we can configure the update_check - need to rework so that 000_config.py is parsed 1st @param settings: the deployment_settings """ # Get Web2py environment into our globals. #globals().update(**environment) request = current.request # Fatal errors errors = [] # Non-fatal warnings warnings = [] # ------------------------------------------------------------------------- # Check Python libraries # Get mandatory global dependencies app_path = request.folder gr_path = os.path.join(app_path, "requirements.txt") or_path = os.path.join(app_path, "optional_requirements.txt") global_dep = parse_requirements({}, gr_path) optional_dep = parse_requirements({}, or_path) templates = settings.get_template() location = settings.get_template_location() if not isinstance(templates, (tuple, list)): templates = (templates,) template_dep = {} template_optional_dep = {} for template in templates: tr_path = os.path.join(app_path, location, "templates", template, "requirements.txt") tor_path = os.path.join(app_path, location, "templates", template, "optional_requirements.txt") parse_requirements(template_dep, tr_path) parse_requirements(template_optional_dep, tor_path) # Remove optional dependencies which are already accounted for in template dependencies unique = set(optional_dep.keys()).difference(set(template_dep.keys())) for dependency in optional_dep.keys(): if dependency not in unique: del optional_dep[dependency] # Override optional dependency messages from template unique = set(optional_dep.keys()).difference(set(template_optional_dep.keys())) for dependency in optional_dep.keys(): if dependency not in unique: del optional_dep[dependency] errors, warnings = s3_check_python_lib(global_dep, template_dep, template_optional_dep, optional_dep) # @ToDo: Move these to Template # for now this is done in s3db.climate_first_run() if settings.has_module("climate"): if settings.get_database_type() != "postgres": errors.append("Climate unresolved dependency: PostgreSQL required") try: import rpy2 except ImportError: errors.append("Climate unresolved dependency: RPy2 required") try: from Scientific.IO import NetCDF except ImportError: warnings.append("Climate unresolved dependency: NetCDF required if you want to import readings") try: from scipy import stats except ImportError: warnings.append("Climate unresolved dependency: SciPy required if you want to generate graphs on the map") # ------------------------------------------------------------------------- # Check Web2Py version # # Currently, the minimum usable Web2py is determined by whether the # Scheduler is available web2py_minimum_version = "Version 2.4.7-stable+timestamp.2013.05.27.11.49.44" # Offset of datetime in return value of parse_version. datetime_index = 4 web2py_version_ok = True try: from gluon.fileutils import parse_version except ImportError: web2py_version_ok = False if web2py_version_ok: try: web2py_minimum_parsed = parse_version(web2py_minimum_version) web2py_minimum_datetime = web2py_minimum_parsed[datetime_index] web2py_installed_version = request.global_settings.web2py_version if isinstance(web2py_installed_version, str): # Post 2.4.2, request.global_settings.web2py_version is unparsed web2py_installed_parsed = parse_version(web2py_installed_version) web2py_installed_datetime = web2py_installed_parsed[datetime_index] else: # 2.4.2 & earlier style web2py_installed_datetime = web2py_installed_version[datetime_index] web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime except: # Will get AttributeError if Web2py's parse_version is too old for # its current version format, which changed in 2.3.2. web2py_version_ok = False if not web2py_version_ok: warnings.append( "The installed version of Web2py is too old to support the current version of Sahana Eden." "\nPlease upgrade Web2py to at least version: %s" % \ web2py_minimum_version) # ------------------------------------------------------------------------- # Create required directories if needed databases_dir = os.path.join(app_path, "databases") try: os.stat(databases_dir) except OSError: # not found, create it os.mkdir(databases_dir) # ------------------------------------------------------------------------- # Copy in Templates # - 000_config.py (machine-specific settings) # - rest are run in-place # template_folder = os.path.join(app_path, "modules", "templates") template_files = { # source : destination "000_config.py" : os.path.join("models", "000_config.py"), } copied_from_template = [] for t in template_files: src_path = os.path.join(template_folder, t) dst_path = os.path.join(app_path, template_files[t]) try: os.stat(dst_path) except OSError: # Not found, copy from template if t == "000_config.py": input = open(src_path) output = open(dst_path, "w") for line in input: if "akeytochange" in line: # Generate a random hmac_key to secure the passwords in case # the database is compromised import uuid hmac_key = uuid.uuid4() line = 'settings.auth.hmac_key = "%s"' % hmac_key output.write(line) output.close() input.close() else: import shutil shutil.copy(src_path, dst_path) copied_from_template.append(template_files[t]) # @ToDo: WebSetup # http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/WebSetup #if not os.path.exists("%s/applications/websetup" % os.getcwd()): # # @ToDo: Check Permissions # # Copy files into this folder (@ToDo: Pythonise) # cp -r private/websetup "%s/applications" % os.getcwd() # Launch WebSetup #redirect(URL(a="websetup", c="default", f="index", # vars=dict(appname=request.application, # firstTime="True"))) else: # Found the file in the destination # Check if it has been edited import re edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)" edited_matcher = re.compile(edited_pattern).match has_edited = False with open(dst_path) as f: for line in f: edited_result = edited_matcher(line) if edited_result: has_edited = True edited = edited_result.group(1) break if has_edited and (edited != "True"): errors.append("Please edit %s before starting the system." % t) # Check if it's up to date (i.e. a critical update requirement) version_pattern = r"VERSION =\s*([0-9]+)" version_matcher = re.compile(version_pattern).match has_version = False with open(dst_path) as f: for line in f: version_result = version_matcher(line) if version_result: has_version = True version = version_result.group(1) break if not has_version: error = "Your %s is using settings from the old templates system. Please switch to the new templates system: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates" % t errors.append(error) elif int(version) != VERSION: error = "Your %s is using settings from template version %s. Please update with new settings from template version %s before starting the system." % \ (t, version, VERSION) errors.append(error) if copied_from_template: errors.append( "The following files were copied from templates and should be edited: %s" % ", ".join(copied_from_template)) return {"error_messages": errors, "warning_messages": warnings} # ------------------------------------------------------------------------- def parse_requirements(output, filepath): """ """ try: with open(filepath) as filehandle: dependencies = filehandle.read().splitlines() msg = "" for dependency in dependencies: if dependency[0] == "#": # either a normal comment or custom message if dependency[:9] == "# Warning" or dependency[7] == "# Error:": msg = dependency.split(":", 1)[1] else: import re # Check if the module name is different from the package name if "#" in dependency: dep = dependency.split("#", 1)[1] output[dep] = msg else: pattern = re.compile(r'([A-Za-z0-9_-]+)') try: dep = pattern.match(dependency).group(1) output[dep] = msg except AttributeError: # Invalid dependency syntax pass msg = "" except IOError: # No override for Template pass return output # ------------------------------------------------------------------------- def s3_check_python_lib(global_mandatory, template_mandatory, template_optional, global_optional): """ checks for optional as well as mandatory python libraries """ errors = [] warnings = [] for dependency, err in global_mandatory.iteritems(): try: if "from" in dependency: exec dependency else: exec "import %s" % dependency except ImportError: if err: errors.append(err) else: errors.append("S3 unresolved dependency: %s required for Sahana to run" % dependency) for dependency, err in template_mandatory.iteritems(): try: if "from" in dependency: exec dependency else: exec "import %s" % dependency except ImportError: if err: errors.append(err) else: errors.append("Unresolved template dependency: %s required" % dependency) for dependency, warn in template_optional.iteritems(): try: if "from" in dependency: exec dependency else: exec "import %s" % dependency except ImportError: if warn: warnings.append(warn) else: warnings.append("Unresolved optional dependency: %s required" % dependency) for dependency, warn in global_optional.iteritems(): try: if "from" in dependency: exec dependency else: exec "import %s" % dependency except ImportError: if warn: warnings.append(warn) else: warnings.append("Unresolved optional dependency: %s required" % dependency) return errors, warnings # END =========================================================================
# Copyright 2013-2018 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import os import re from wa.framework import pluginloader from wa.framework.plugin import Plugin from wa.framework.exception import ResourceError from wa.framework.configuration import settings from wa.utils import log from wa.utils.android import get_cacheable_apk_info from wa.utils.misc import get_object_name from wa.utils.types import enum, list_or_string, prioritylist, version_tuple SourcePriority = enum(['package', 'remote', 'lan', 'local', 'perferred'], start=0, step=10) class __NullOwner(object): """Represents an owner for a resource not owned by anyone.""" name = 'noone' dependencies_directory = settings.dependencies_directory def __getattr__(self, name): return None def __str__(self): return 'no-one' __repr__ = __str__ NO_ONE = __NullOwner() class Resource(object): """ Represents a resource that needs to be resolved. This can be pretty much anything: a file, environment variable, a Python object, etc. The only thing a resource *has* to have is an owner (which would normally be the Workload/Instrument/Device/etc object that needs the resource). In addition, a resource have any number of attributes to identify, but all of them are resource type specific. """ kind = None def __init__(self, owner=NO_ONE): self.owner = owner def match(self, path): return self.match_path(path) def match_path(self, path): raise NotImplementedError() def __str__(self): return '<{}\'s {}>'.format(self.owner, self.kind) class File(Resource): kind = 'file' def __init__(self, owner, path): super(File, self).__init__(owner) self.path = path def match_path(self, path): return self.path == path def __str__(self): return '<{}\'s {} {} file>'.format(self.owner, self.kind, self.path) class Executable(Resource): kind = 'executable' def __init__(self, owner, abi, filename): super(Executable, self).__init__(owner) self.abi = abi self.filename = filename def match_path(self, path): return self.filename == os.path.basename(path) def __str__(self): return '<{}\'s {} {} executable>'.format(self.owner, self.abi, self.filename) class ReventFile(Resource): kind = 'revent' def __init__(self, owner, stage, target): super(ReventFile, self).__init__(owner) self.stage = stage self.target = target def match_path(self, path): filename = os.path.basename(path) parts = filename.split('.') if len(parts) > 2: target, stage = parts[:2] return target == self.target and stage == self.stage else: stage = parts[0] return stage == self.stage class JarFile(Resource): kind = 'jar' def match_path(self, path): # An owner always has at most one jar file, so # always match return True class ApkFile(Resource): kind = 'apk' def __init__(self, owner, variant=None, version=None, package=None, uiauto=False, exact_abi=False, supported_abi=None, min_version=None, max_version=None): super(ApkFile, self).__init__(owner) self.variant = variant self.version = version self.max_version = max_version self.min_version = min_version self.package = package self.uiauto = uiauto self.exact_abi = exact_abi self.supported_abi = supported_abi def match_path(self, path): ext = os.path.splitext(path)[1].lower() return ext == '.apk' def match(self, path): name_matches = True version_matches = True version_range_matches = True package_matches = True abi_matches = True uiauto_matches = uiauto_test_matches(path, self.uiauto) if self.version: version_matches = apk_version_matches(path, self.version) if self.max_version or self.min_version: version_range_matches = apk_version_matches_range(path, self.min_version, self.max_version) if self.variant: name_matches = file_name_matches(path, self.variant) if self.package: package_matches = package_name_matches(path, self.package) if self.supported_abi: abi_matches = apk_abi_matches(path, self.supported_abi, self.exact_abi) return name_matches and version_matches and \ version_range_matches and uiauto_matches \ and package_matches and abi_matches def __str__(self): text = '<{}\'s apk'.format(self.owner) if self.variant: text += ' {}'.format(self.variant) if self.version: text += ' {}'.format(self.version) if self.uiauto: text += 'uiautomator test' text += '>' return text class ResourceGetter(Plugin): """ Base class for implementing resolvers. Defines resolver interface. Resolvers are responsible for discovering resources (such as particular kinds of files) they know about based on the parameters that are passed to them. Each resolver also has a dict of attributes that describe it's operation, and may be used to determine which get invoked. There is no pre-defined set of attributes and resolvers may define their own. Class attributes: :name: Name that uniquely identifies this getter. Must be set by any concrete subclass. :priority: Priority with which this getter will be invoked. This should be one of the standard priorities specified in ``GetterPriority`` enumeration. If not set, this will default to ``GetterPriority.environment``. """ name = None kind = 'resource_getter' def register(self, resolver): raise NotImplementedError() def initialize(self): pass def __str__(self): return '<ResourceGetter {}>'.format(self.name) class ResourceResolver(object): """ Discovers and registers getters, and then handles requests for resources using registered getters. """ def __init__(self, loader=pluginloader): self.loader = loader self.logger = logging.getLogger('resolver') self.getters = [] self.sources = prioritylist() def load(self): for gettercls in self.loader.list_plugins('resource_getter'): self.logger.debug('Loading getter {}'.format(gettercls.name)) getter = self.loader.get_plugin(name=gettercls.name, kind="resource_getter") with log.indentcontext(): getter.initialize() getter.register(self) self.getters.append(getter) def register(self, source, priority=SourcePriority.local): msg = 'Registering "{}" with priority "{}"' self.logger.debug(msg.format(get_object_name(source), priority)) self.sources.add(source, priority) def get(self, resource, strict=True): """ Uses registered getters to attempt to discover a resource of the specified kind and matching the specified criteria. Returns path to the resource that has been discovered. If a resource has not been discovered, this will raise a ``ResourceError`` or, if ``strict`` has been set to ``False``, will return ``None``. """ self.logger.debug('Resolving {}'.format(resource)) for source in self.sources: source_name = get_object_name(source) self.logger.debug('Trying {}'.format(source_name)) result = source(resource) if result is not None: msg = 'Resource {} found using {}:' self.logger.debug(msg.format(resource, source_name)) self.logger.debug('\t{}'.format(result)) return result if strict: raise ResourceError('{} could not be found'.format(resource)) self.logger.debug('Resource {} not found.'.format(resource)) return None def apk_version_matches(path, version): version = list_or_string(version) info = get_cacheable_apk_info(path) for v in version: if v in (info.version_name, info.version_code): return True if loose_version_matching(v, info.version_name): return True return False def apk_version_matches_range(path, min_version=None, max_version=None): info = get_cacheable_apk_info(path) return range_version_matching(info.version_name, min_version, max_version) def range_version_matching(apk_version, min_version=None, max_version=None): if not apk_version: return False apk_version = version_tuple(apk_version) if max_version: max_version = version_tuple(max_version) if apk_version > max_version: return False if min_version: min_version = version_tuple(min_version) if apk_version < min_version: return False return True def loose_version_matching(config_version, apk_version): config_version = version_tuple(config_version) apk_version = version_tuple(apk_version) if len(apk_version) < len(config_version): return False # More specific version requested than available for i in range(len(config_version)): if config_version[i] != apk_version[i]: return False return True def file_name_matches(path, pattern): filename = os.path.basename(path) if pattern in filename: return True if re.search(pattern, filename): return True return False def uiauto_test_matches(path, uiauto): info = get_cacheable_apk_info(path) return uiauto == ('com.arm.wa.uiauto' in info.package) def package_name_matches(path, package): info = get_cacheable_apk_info(path) return info.package == package def apk_abi_matches(path, supported_abi, exact_abi=False): supported_abi = list_or_string(supported_abi) info = get_cacheable_apk_info(path) # If no native code present, suitable for all devices. if not info.native_code: return True if exact_abi: # Only check primary return supported_abi[0] in info.native_code else: for abi in supported_abi: if abi in info.native_code: return True return False
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This file is part of the web2py Web Framework Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>, limodou <limodou@gmail.com> and srackham <srackham@gmail.com>. License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) """ import os import sys import code import logging import types import re import optparse import glob import traceback import gluon.fileutils as fileutils from gluon.settings import global_settings from gluon.utils import web2py_uuid from gluon.compileapp import build_environment, read_pyc, run_models_in from gluon.restricted import RestrictedError from gluon.globals import Request, Response, Session from gluon.storage import Storage, List from gluon.admin import w2p_unpack from gluon.dal import BaseAdapter logger = logging.getLogger("web2py") def enable_autocomplete_and_history(adir,env): try: import rlcompleter import atexit import readline except ImportError: pass else: readline.parse_and_bind("bind ^I rl_complete" if sys.platform == 'darwin' else "tab: complete") history_file = os.path.join(adir,'.pythonhistory') try: readline.read_history_file(history_file) except IOError: open(history_file, 'a').close() atexit.register(readline.write_history_file, history_file) readline.set_completer(rlcompleter.Completer(env).complete) def exec_environment( pyfile='', request=None, response=None, session=None, ): """ .. function:: gluon.shell.exec_environment([pyfile=''[, request=Request() [, response=Response[, session=Session()]]]]) Environment builder and module loader. Builds a web2py environment and optionally executes a Python file into the environment. A Storage dictionary containing the resulting environment is returned. The working directory must be web2py root -- this is the web2py default. """ if request is None: request = Request({}) if response is None: response = Response() if session is None: session = Session() if request.folder is None: mo = re.match(r'(|.*/)applications/(?P<appname>[^/]+)', pyfile) if mo: appname = mo.group('appname') request.folder = os.path.join('applications', appname) else: request.folder = '' env = build_environment(request, response, session, store_current=False) if pyfile: pycfile = pyfile + 'c' if os.path.isfile(pycfile): exec read_pyc(pycfile) in env else: execfile(pyfile, env) return Storage(env) def env( a, import_models=False, c=None, f=None, dir='', extra_request={}, ): """ Return web2py execution environment for application (a), controller (c), function (f). If import_models is True the exec all application models into the environment. extra_request allows you to pass along any extra variables to the request object before your models get executed. This was mainly done to support web2py_utils.test_runner, however you can use it with any wrapper scripts that need access to the web2py environment. """ request = Request({}) response = Response() session = Session() request.application = a # Populate the dummy environment with sensible defaults. if not dir: request.folder = os.path.join('applications', a) else: request.folder = dir request.controller = c or 'default' request.function = f or 'index' response.view = '%s/%s.html' % (request.controller, request.function) if global_settings.cmd_options: ip = global_settings.cmd_options.ip port = global_settings.cmd_options.port else: ip, port = '127.0.0.1', '8000' request.env.http_host = '%s:%s' % (ip,port) request.env.remote_addr = '127.0.0.1' request.env.web2py_runtime_gae = global_settings.web2py_runtime_gae for k, v in extra_request.items(): request[k] = v path_info = '/%s/%s/%s' % (a, c, f) if request.args: path_info = '%s/%s' % (path_info, '/'.join(request.args)) if request.vars: vars = ['%s=%s' % (k,v) if v else '%s' % k for (k,v) in request.vars.iteritems()] path_info = '%s?%s' % (path_info, '&'.join(vars)) request.env.path_info = path_info # Monkey patch so credentials checks pass. def check_credentials(request, other_application='admin'): return True fileutils.check_credentials = check_credentials environment = build_environment(request, response, session) if import_models: try: run_models_in(environment) except RestrictedError, e: sys.stderr.write(e.traceback + '\n') sys.exit(1) environment['__name__'] = '__main__' return environment def exec_pythonrc(): pythonrc = os.environ.get('PYTHONSTARTUP') if pythonrc and os.path.isfile(pythonrc): def execfile_getlocals(file): execfile(file) return locals() try: return execfile_getlocals(pythonrc) except NameError: pass return dict() def run( appname, plain=False, import_models=False, startfile=None, bpython=False, python_code=False, cronjob=False): """ Start interactive shell or run Python script (startfile) in web2py controller environment. appname is formatted like: a web2py application name a/c exec the controller c into the application environment """ (a, c, f, args, vars) = parse_path_info(appname, av=True) errmsg = 'invalid application name: %s' % appname if not a: die(errmsg) adir = os.path.join('applications', a) if not os.path.exists(adir): if sys.stdin and not sys.stdin.name == '/dev/null': confirm = raw_input( 'application %s does not exist, create (y/n)?' % a) else: logging.warn('application does not exist and will not be created') return if confirm.lower() in ['y', 'yes']: os.mkdir(adir) w2p_unpack('welcome.w2p', adir) for subfolder in ['models', 'views', 'controllers', 'databases', 'modules', 'cron', 'errors', 'sessions', 'languages', 'static', 'private', 'uploads']: subpath = os.path.join(adir, subfolder) if not os.path.exists(subpath): os.mkdir(subpath) db = os.path.join(adir, 'models/db.py') if os.path.exists(db): data = fileutils.read_file(db) data = data.replace( '<your secret key>', 'sha512:' + web2py_uuid()) fileutils.write_file(db, data) if c: import_models = True extra_request = {} if args: extra_request['args'] = args if vars: extra_request['vars'] = vars _env = env(a, c=c, f=f, import_models=import_models, extra_request=extra_request) if c: pyfile = os.path.join('applications', a, 'controllers', c + '.py') pycfile = os.path.join('applications', a, 'compiled', "controllers_%s_%s.pyc" % (c, f)) if ((cronjob and os.path.isfile(pycfile)) or not os.path.isfile(pyfile)): exec read_pyc(pycfile) in _env elif os.path.isfile(pyfile): execfile(pyfile, _env) else: die(errmsg) if f: exec ('print %s()' % f, _env) return _env.update(exec_pythonrc()) if startfile: try: ccode = None if startfile.endswith('.pyc'): ccode = read_pyc(startfile) exec ccode in _env else: execfile(startfile, _env) if import_models: BaseAdapter.close_all_instances('commit') except Exception, e: print traceback.format_exc() if import_models: BaseAdapter.close_all_instances('rollback') elif python_code: try: exec(python_code, _env) if import_models: BaseAdapter.close_all_instances('commit') except Exception, e: print traceback.format_exc() if import_models: BaseAdapter.close_all_instances('rollback') else: if not plain: if bpython: try: import bpython bpython.embed(locals_=_env) return except: logger.warning( 'import bpython error; trying ipython...') else: try: import IPython if IPython.__version__ > '1.0.0': IPython.start_ipython(user_ns=_env) return elif IPython.__version__ == '1.0.0': from IPython.terminal.embed import InteractiveShellEmbed shell = InteractiveShellEmbed(user_ns=_env) shell() return elif IPython.__version__ >= '0.11': from IPython.frontend.terminal.embed import InteractiveShellEmbed shell = InteractiveShellEmbed(user_ns=_env) shell() return else: # following 2 lines fix a problem with # IPython; thanks Michael Toomim if '__builtins__' in _env: del _env['__builtins__'] shell = IPython.Shell.IPShell(argv=[], user_ns=_env) shell.mainloop() return except: logger.warning( 'import IPython error; use default python shell') enable_autocomplete_and_history(adir,_env) code.interact(local=_env) def parse_path_info(path_info, av=False): """ Parse path info formatted like a/c/f where c and f are optional and a leading / accepted. Return tuple (a, c, f). If invalid path_info a is set to None. If c or f are omitted they are set to None. If av=True, parse args and vars """ if av: vars = None if '?' in path_info: path_info, query = path_info.split('?', 2) vars = Storage() for var in query.split('&'): (var, val) = var.split('=', 2) if '=' in var else (var, None) vars[var] = val items = List(path_info.split('/')) args = List(items[3:]) if len(items) > 3 else None return (items(0), items(1), items(2), args, vars) mo = re.match(r'^/?(?P<a>\w+)(/(?P<c>\w+)(/(?P<f>\w+))?)?$', path_info) if mo: return (mo.group('a'), mo.group('c'), mo.group('f')) else: return (None, None, None) def die(msg): print >> sys.stderr, msg sys.exit(1) def test(testpath, import_models=True, verbose=False): """ Run doctests in web2py environment. testpath is formatted like: a tests all controllers in application a a/c tests controller c in application a a/c/f test function f in controller c, application a Where a, c and f are application, controller and function names respectively. If the testpath is a file name the file is tested. If a controller is specified models are executed by default. """ import doctest if os.path.isfile(testpath): mo = re.match(r'(|.*/)applications/(?P<a>[^/]+)', testpath) if not mo: die('test file is not in application directory: %s' % testpath) a = mo.group('a') c = f = None files = [testpath] else: (a, c, f) = parse_path_info(testpath) errmsg = 'invalid test path: %s' % testpath if not a: die(errmsg) cdir = os.path.join('applications', a, 'controllers') if not os.path.isdir(cdir): die(errmsg) if c: cfile = os.path.join(cdir, c + '.py') if not os.path.isfile(cfile): die(errmsg) files = [cfile] else: files = glob.glob(os.path.join(cdir, '*.py')) for testfile in files: globs = env(a, import_models) ignores = globs.keys() execfile(testfile, globs) def doctest_object(name, obj): """doctest obj and enclosed methods and classes.""" if type(obj) in (types.FunctionType, types.TypeType, types.ClassType, types.MethodType, types.UnboundMethodType): # Reload environment before each test. globs = env(a, c=c, f=f, import_models=import_models) execfile(testfile, globs) doctest.run_docstring_examples( obj, globs=globs, name='%s: %s' % (os.path.basename(testfile), name), verbose=verbose) if type(obj) in (types.TypeType, types.ClassType): for attr_name in dir(obj): # Execute . operator so decorators are executed. o = eval('%s.%s' % (name, attr_name), globs) doctest_object(attr_name, o) for (name, obj) in globs.items(): if name not in ignores and (f is None or f == name): doctest_object(name, obj) def get_usage(): usage = """ %prog [options] pythonfile """ return usage def execute_from_command_line(argv=None): if argv is None: argv = sys.argv parser = optparse.OptionParser(usage=get_usage()) parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME', help='run web2py in interactive shell ' + 'or IPython(if installed) with specified appname') msg = 'run web2py in interactive shell or bpython (if installed) with' msg += ' specified appname (if app does not exist it will be created).' msg += '\n Use combined with --shell' parser.add_option( '-B', '--bpython', action='store_true', default=False, dest='bpython', help=msg, ) parser.add_option( '-P', '--plain', action='store_true', default=False, dest='plain', help='only use plain python shell, should be used with --shell option', ) parser.add_option( '-M', '--import_models', action='store_true', default=False, dest='import_models', help='auto import model files, default is False, ' + ' should be used with --shell option', ) parser.add_option( '-R', '--run', dest='run', metavar='PYTHON_FILE', default='', help='run PYTHON_FILE in web2py environment, ' + 'should be used with --shell option', ) (options, args) = parser.parse_args(argv[1:]) if len(sys.argv) == 1: parser.print_help() sys.exit(0) if len(args) > 0: startfile = args[0] else: startfile = '' run(options.shell, options.plain, startfile=startfile, bpython=options.bpython) if __name__ == '__main__': execute_from_command_line()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class ConnectionOperations(object): """ConnectionOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.automation.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def delete( self, resource_group_name, # type: str automation_account_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> None """Delete the connection. :param resource_group_name: Name of an Azure Resource group. :type resource_group_name: str :param automation_account_name: The name of the automation account. :type automation_account_name: str :param connection_name: The name of connection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01" accept = "application/json" # Construct URL url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'), 'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/connections/{connectionName}'} # type: ignore def get( self, resource_group_name, # type: str automation_account_name, # type: str connection_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.Connection" """Retrieve the connection identified by connection name. :param resource_group_name: Name of an Azure Resource group. :type resource_group_name: str :param automation_account_name: The name of the automation account. :type automation_account_name: str :param connection_name: The name of connection. :type connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Connection, or the result of cls(response) :rtype: ~azure.mgmt.automation.models.Connection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Connection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'), 'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Connection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/connections/{connectionName}'} # type: ignore def create_or_update( self, resource_group_name, # type: str automation_account_name, # type: str connection_name, # type: str parameters, # type: "_models.ConnectionCreateOrUpdateParameters" **kwargs # type: Any ): # type: (...) -> "_models.Connection" """Create or update a connection. :param resource_group_name: Name of an Azure Resource group. :type resource_group_name: str :param automation_account_name: The name of the automation account. :type automation_account_name: str :param connection_name: The parameters supplied to the create or update connection operation. :type connection_name: str :param parameters: The parameters supplied to the create or update connection operation. :type parameters: ~azure.mgmt.automation.models.ConnectionCreateOrUpdateParameters :keyword callable cls: A custom type or function that will be passed the direct response :return: Connection, or the result of cls(response) :rtype: ~azure.mgmt.automation.models.Connection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Connection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'), 'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'ConnectionCreateOrUpdateParameters') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('Connection', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('Connection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/connections/{connectionName}'} # type: ignore def update( self, resource_group_name, # type: str automation_account_name, # type: str connection_name, # type: str parameters, # type: "_models.ConnectionUpdateParameters" **kwargs # type: Any ): # type: (...) -> "_models.Connection" """Update a connection. :param resource_group_name: Name of an Azure Resource group. :type resource_group_name: str :param automation_account_name: The name of the automation account. :type automation_account_name: str :param connection_name: The parameters supplied to the update a connection operation. :type connection_name: str :param parameters: The parameters supplied to the update a connection operation. :type parameters: ~azure.mgmt.automation.models.ConnectionUpdateParameters :keyword callable cls: A custom type or function that will be passed the direct response :return: Connection, or the result of cls(response) :rtype: ~azure.mgmt.automation.models.Connection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Connection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'), 'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'), 'connectionName': self._serialize.url("connection_name", connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'ConnectionUpdateParameters') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Connection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/connections/{connectionName}'} # type: ignore def list_by_automation_account( self, resource_group_name, # type: str automation_account_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.ConnectionListResult"] """Retrieve a list of connections. :param resource_group_name: Name of an Azure Resource group. :type resource_group_name: str :param automation_account_name: The name of the automation account. :type automation_account_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ConnectionListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.automation.models.ConnectionListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-06-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_automation_account.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'), 'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('ConnectionListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_by_automation_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/connections'} # type: ignore
import numpy as np from ... import util from ... import graph from ... import grouping from ..entities import Line, Arc from collections import deque def dict_to_path(as_dict): """ Turn a pure dict into a dict containing entity objects that can be sent directly to a Path constructor. Parameters ------------ as_dict : dict Has keys: 'vertices', 'entities' Returns ------------ kwargs : dict Has keys: 'vertices', 'entities' """ # start kwargs with initial value result = as_dict.copy() # map of constructors loaders = {'Arc': Arc, 'Line': Line} # pre- allocate entity array entities = [None] * len(as_dict['entities']) # run constructor for dict kwargs for entity_index, entity in enumerate(as_dict['entities']): entities[entity_index] = loaders[entity['type']]( points=entity['points'], closed=entity['closed']) result['entities'] = entities return result def lines_to_path(lines): """ Turn line segments into a Path2D or Path3D object. Parameters ------------ lines : (n, 2, dimension) or (n, dimension) float Line segments or connected polyline curve in 2D or 3D Returns ----------- kwargs : dict kwargs for Path constructor """ lines = np.asanyarray(lines, dtype=np.float64) if util.is_shape(lines, (-1, (2, 3))): # the case where we have a list of points # we are going to assume they are connected result = {'entities': np.array([Line(np.arange(len(lines)))]), 'vertices': lines} return result elif util.is_shape(lines, (-1, 2, (2, 3))): # case where we have line segments in 2D or 3D dimension = lines.shape[-1] # convert lines to even number of (n, dimension) points lines = lines.reshape((-1, dimension)) # merge duplicate vertices unique, inverse = grouping.unique_rows(lines) # use scipy edges_to_path to skip creating # a bajillion individual line entities which # will be super slow vs. fewer polyline entities return edges_to_path(edges=inverse.reshape((-1, 2)), vertices=lines[unique]) else: raise ValueError('Lines must be (n,(2|3)) or (n,2,(2|3))') return result def polygon_to_path(polygon): """ Load shapely Polygon objects into a trimesh.path.Path2D object Parameters ------------- polygon : shapely.geometry.Polygon Input geometry Returns ----------- kwargs : dict Keyword arguments for Path2D constructor """ # start with a single polyline for the exterior entities = deque([Line(points=np.arange( len(polygon.exterior.coords)))]) # start vertices vertices = np.array(polygon.exterior.coords).tolist() # append interiors as single Line objects for boundary in polygon.interiors: entities.append(Line(np.arange(len(boundary.coords)) + len(vertices))) # append the new vertex array vertices.extend(boundary.coords) # make sure result arrays are numpy kwargs = {'entities': np.array(entities), 'vertices': np.array(vertices)} return kwargs def linestrings_to_path(multi): """ Load shapely LineString objects into a trimesh.path.Path2D object Parameters ------------- multi : shapely.geometry.LineString or MultiLineString Input 2D geometry Returns ------------- kwargs : dict Keyword arguments for Path2D constructor """ # append to result as we go entities = [] vertices = [] if not util.is_sequence(multi): multi = [multi] for line in multi: # only append geometry with points if hasattr(line, 'coords'): coords = np.array(line.coords) if len(coords) < 2: continue entities.append(Line(np.arange(len(coords)) + len(vertices))) vertices.extend(coords) kwargs = {'entities': np.array(entities), 'vertices': np.array(vertices)} return kwargs def faces_to_path(mesh, face_ids=None, **kwargs): """ Given a mesh and face indices find the outline edges and turn them into a Path3D. Parameters ------------ mesh : trimesh.Trimesh Triangulated surface in 3D face_ids : (n,) int Indexes referencing mesh.faces Returns --------- kwargs : dict Kwargs for Path3D constructor """ if face_ids is None: edges = mesh.edges_sorted else: # take advantage of edge ordering to index as single row edges = mesh.edges_sorted.reshape( (-1, 6))[face_ids].reshape((-1, 2)) # an edge which occurs onely once is on the boundary unique_edges = grouping.group_rows( edges, require_count=1) # add edges and vertices to kwargs kwargs.update(edges_to_path(edges=edges[unique_edges], vertices=mesh.vertices)) return kwargs def edges_to_path(edges, vertices, **kwargs): """ Given an edge list of indices and associated vertices representing lines, generate kwargs for a Path object. Parameters ----------- edges : (n, 2) int Vertex indices of line segments vertices : (m, dimension) float Vertex positions where dimension is 2 or 3 Returns ---------- kwargs : dict Kwargs for Path constructor """ # sequence of ordered traversals dfs = graph.traversals(edges, mode='dfs') # make sure every consecutive index in DFS # traversal is an edge in the source edge list dfs_connected = graph.fill_traversals(dfs, edges=edges) # kwargs for Path constructor # turn traversals into Line objects lines = [Line(d) for d in dfs_connected] kwargs.update({'entities': lines, 'vertices': vertices, 'process': False}) return kwargs
# stdlib import re import time import urllib # 3p import pymongo # project from checks import AgentCheck from urlparse import urlsplit from config import _is_affirmative from distutils.version import LooseVersion # pylint: disable=E0611,E0401 DEFAULT_TIMEOUT = 30 GAUGE = AgentCheck.gauge RATE = AgentCheck.rate class MongoDb(AgentCheck): """ MongoDB agent check. # Metrics Metric available for collection are listed by topic as `MongoDb` class variables. Various metric topics are collected by default. Others require the corresponding option enabled in the check configuration file. ## Format Metrics are listed with the following format: ``` metric_name -> metric_type ``` or ``` metric_name -> (metric_type, alias)* ``` * `alias` parameter is optional, if unspecified, MongoDB metrics are reported with their original metric names. # Service checks Available service checks: * `mongodb.can_connect` Connectivity health to the instance. * `mongodb.replica_set_member_state` Disposition of the member replica set state. """ # Source SOURCE_TYPE_NAME = 'mongodb' # Service check SERVICE_CHECK_NAME = 'mongodb.can_connect' # Metrics """ Core metrics collected by default. """ BASE_METRICS = { "asserts.msg": RATE, "asserts.regular": RATE, "asserts.rollovers": RATE, "asserts.user": RATE, "asserts.warning": RATE, "backgroundFlushing.average_ms": GAUGE, "backgroundFlushing.flushes": RATE, "backgroundFlushing.last_ms": GAUGE, "backgroundFlushing.total_ms": GAUGE, "connections.available": GAUGE, "connections.current": GAUGE, "connections.totalCreated": GAUGE, "cursors.timedOut": GAUGE, "cursors.totalOpen": GAUGE, "extra_info.heap_usage_bytes": RATE, "extra_info.page_faults": RATE, "fsyncLocked": GAUGE, "globalLock.activeClients.readers": GAUGE, "globalLock.activeClients.total": GAUGE, "globalLock.activeClients.writers": GAUGE, "globalLock.currentQueue.readers": GAUGE, "globalLock.currentQueue.total": GAUGE, "globalLock.currentQueue.writers": GAUGE, "globalLock.lockTime": GAUGE, "globalLock.ratio": GAUGE, # < 2.2 "globalLock.totalTime": GAUGE, "indexCounters.accesses": RATE, "indexCounters.btree.accesses": RATE, # < 2.4 "indexCounters.btree.hits": RATE, # < 2.4 "indexCounters.btree.misses": RATE, # < 2.4 "indexCounters.btree.missRatio": GAUGE, # < 2.4 "indexCounters.hits": RATE, "indexCounters.misses": RATE, "indexCounters.missRatio": GAUGE, "indexCounters.resets": RATE, "mem.bits": GAUGE, "mem.mapped": GAUGE, "mem.mappedWithJournal": GAUGE, "mem.resident": GAUGE, "mem.virtual": GAUGE, "metrics.cursor.open.noTimeout": GAUGE, "metrics.cursor.open.pinned": GAUGE, "metrics.cursor.open.total": GAUGE, "metrics.cursor.timedOut": RATE, "metrics.document.deleted": RATE, "metrics.document.inserted": RATE, "metrics.document.returned": RATE, "metrics.document.updated": RATE, "metrics.getLastError.wtime.num": RATE, "metrics.getLastError.wtime.totalMillis": RATE, "metrics.getLastError.wtimeouts": RATE, "metrics.operation.fastmod": RATE, "metrics.operation.idhack": RATE, "metrics.operation.scanAndOrder": RATE, "metrics.operation.writeConflicts": RATE, "metrics.queryExecutor.scanned": RATE, "metrics.record.moves": RATE, "metrics.repl.apply.batches.num": RATE, "metrics.repl.apply.batches.totalMillis": RATE, "metrics.repl.apply.ops": RATE, "metrics.repl.buffer.count": GAUGE, "metrics.repl.buffer.maxSizeBytes": GAUGE, "metrics.repl.buffer.sizeBytes": GAUGE, "metrics.repl.network.bytes": RATE, "metrics.repl.network.getmores.num": RATE, "metrics.repl.network.getmores.totalMillis": RATE, "metrics.repl.network.ops": RATE, "metrics.repl.network.readersCreated": RATE, "metrics.repl.oplog.insert.num": RATE, "metrics.repl.oplog.insert.totalMillis": RATE, "metrics.repl.oplog.insertBytes": RATE, "metrics.repl.preload.docs.num": RATE, "metrics.repl.preload.docs.totalMillis": RATE, "metrics.repl.preload.indexes.num": RATE, "metrics.repl.preload.indexes.totalMillis": RATE, "metrics.repl.storage.freelist.search.bucketExhausted": RATE, "metrics.repl.storage.freelist.search.requests": RATE, "metrics.repl.storage.freelist.search.scanned": RATE, "metrics.ttl.deletedDocuments": RATE, "metrics.ttl.passes": RATE, "network.bytesIn": RATE, "network.bytesOut": RATE, "network.numRequests": RATE, "opcounters.command": RATE, "opcounters.delete": RATE, "opcounters.getmore": RATE, "opcounters.insert": RATE, "opcounters.query": RATE, "opcounters.update": RATE, "opcountersRepl.command": RATE, "opcountersRepl.delete": RATE, "opcountersRepl.getmore": RATE, "opcountersRepl.insert": RATE, "opcountersRepl.query": RATE, "opcountersRepl.update": RATE, "oplog.logSizeMB": GAUGE, "oplog.usedSizeMB": GAUGE, "oplog.timeDiff": GAUGE, "replSet.health": GAUGE, "replSet.replicationLag": GAUGE, "replSet.state": GAUGE, "replSet.votes": GAUGE, "replSet.voteFraction": GAUGE, "stats.avgObjSize": GAUGE, "stats.collections": GAUGE, "stats.dataSize": GAUGE, "stats.fileSize": GAUGE, "stats.indexes": GAUGE, "stats.indexSize": GAUGE, "stats.nsSizeMB": GAUGE, "stats.numExtents": GAUGE, "stats.objects": GAUGE, "stats.storageSize": GAUGE, "uptime": GAUGE, } """ Journaling-related operations and performance report. https://docs.mongodb.org/manual/reference/command/serverStatus/#serverStatus.dur """ DURABILITY_METRICS = { "dur.commits": GAUGE, "dur.commitsInWriteLock": GAUGE, "dur.compression": GAUGE, "dur.earlyCommits": GAUGE, "dur.journaledMB": GAUGE, "dur.timeMs.dt": GAUGE, "dur.timeMs.prepLogBuffer": GAUGE, "dur.timeMs.remapPrivateView": GAUGE, "dur.timeMs.writeToDataFiles": GAUGE, "dur.timeMs.writeToJournal": GAUGE, "dur.writeToDataFilesMB": GAUGE, # Required version > 3.0.0 "dur.timeMs.commits": GAUGE, "dur.timeMs.commitsInWriteLock": GAUGE, } """ ServerStatus use of database commands report. Required version > 3.0.0. https://docs.mongodb.org/manual/reference/command/serverStatus/#serverStatus.metrics.commands """ COMMANDS_METRICS = { # Required version > "metrics.commands.count.failed": RATE, "metrics.commands.count.total": GAUGE, "metrics.commands.createIndexes.failed": RATE, "metrics.commands.createIndexes.total": GAUGE, "metrics.commands.delete.failed": RATE, "metrics.commands.delete.total": GAUGE, "metrics.commands.eval.failed": RATE, "metrics.commands.eval.total": GAUGE, "metrics.commands.findAndModify.failed": RATE, "metrics.commands.findAndModify.total": GAUGE, "metrics.commands.insert.failed": RATE, "metrics.commands.insert.total": GAUGE, "metrics.commands.update.failed": RATE, "metrics.commands.update.total": GAUGE, } """ ServerStatus locks report. Required version > 3.0.0. https://docs.mongodb.org/manual/reference/command/serverStatus/#server-status-locks """ LOCKS_METRICS = { "locks.Collection.acquireCount.R": RATE, "locks.Collection.acquireCount.r": RATE, "locks.Collection.acquireCount.W": RATE, "locks.Collection.acquireCount.w": RATE, "locks.Collection.acquireWaitCount.R": RATE, "locks.Collection.acquireWaitCount.W": RATE, "locks.Collection.timeAcquiringMicros.R": RATE, "locks.Collection.timeAcquiringMicros.W": RATE, "locks.Database.acquireCount.r": RATE, "locks.Database.acquireCount.R": RATE, "locks.Database.acquireCount.w": RATE, "locks.Database.acquireCount.W": RATE, "locks.Database.acquireWaitCount.r": RATE, "locks.Database.acquireWaitCount.R": RATE, "locks.Database.acquireWaitCount.w": RATE, "locks.Database.acquireWaitCount.W": RATE, "locks.Database.timeAcquiringMicros.r": RATE, "locks.Database.timeAcquiringMicros.R": RATE, "locks.Database.timeAcquiringMicros.w": RATE, "locks.Database.timeAcquiringMicros.W": RATE, "locks.Global.acquireCount.r": RATE, "locks.Global.acquireCount.R": RATE, "locks.Global.acquireCount.w": RATE, "locks.Global.acquireCount.W": RATE, "locks.Global.acquireWaitCount.r": RATE, "locks.Global.acquireWaitCount.R": RATE, "locks.Global.acquireWaitCount.w": RATE, "locks.Global.acquireWaitCount.W": RATE, "locks.Global.timeAcquiringMicros.r": RATE, "locks.Global.timeAcquiringMicros.R": RATE, "locks.Global.timeAcquiringMicros.w": RATE, "locks.Global.timeAcquiringMicros.W": RATE, "locks.Metadata.acquireCount.R": RATE, "locks.Metadata.acquireCount.W": RATE, "locks.MMAPV1Journal.acquireCount.r": RATE, "locks.MMAPV1Journal.acquireCount.w": RATE, "locks.MMAPV1Journal.acquireWaitCount.r": RATE, "locks.MMAPV1Journal.acquireWaitCount.w": RATE, "locks.MMAPV1Journal.timeAcquiringMicros.r": RATE, "locks.MMAPV1Journal.timeAcquiringMicros.w": RATE, "locks.oplog.acquireCount.R": RATE, "locks.oplog.acquireCount.w": RATE, "locks.oplog.acquireWaitCount.R": RATE, "locks.oplog.acquireWaitCount.w": RATE, "locks.oplog.timeAcquiringMicros.R": RATE, "locks.oplog.timeAcquiringMicros.w": RATE, } """ TCMalloc memory allocator report. """ TCMALLOC_METRICS = { "tcmalloc.generic.current_allocated_bytes": GAUGE, "tcmalloc.generic.heap_size": GAUGE, "tcmalloc.tcmalloc.aggressive_memory_decommit": GAUGE, "tcmalloc.tcmalloc.central_cache_free_bytes": GAUGE, "tcmalloc.tcmalloc.current_total_thread_cache_bytes": GAUGE, "tcmalloc.tcmalloc.max_total_thread_cache_bytes": GAUGE, "tcmalloc.tcmalloc.pageheap_free_bytes": GAUGE, "tcmalloc.tcmalloc.pageheap_unmapped_bytes": GAUGE, "tcmalloc.tcmalloc.thread_cache_free_bytes": GAUGE, "tcmalloc.tcmalloc.transfer_cache_free_bytes": GAUGE, } """ WiredTiger storage engine. """ WIREDTIGER_METRICS = { "wiredTiger.cache.bytes currently in the cache": (GAUGE, "wiredTiger.cache.bytes_currently_in_cache"), # noqa "wiredTiger.cache.failed eviction of pages that exceeded the in-memory maximum": (RATE, "wiredTiger.cache.failed_eviction_of_pages_exceeding_the_in-memory_maximum"), # noqa "wiredTiger.cache.in-memory page splits": GAUGE, "wiredTiger.cache.maximum bytes configured": GAUGE, "wiredTiger.cache.maximum page size at eviction": GAUGE, "wiredTiger.cache.modified pages evicted": GAUGE, "wiredTiger.cache.pages read into cache": GAUGE, # noqa "wiredTiger.cache.pages written from cache": GAUGE, # noqa "wiredTiger.cache.pages currently held in the cache": (GAUGE, "wiredTiger.cache.pages_currently_held_in_cache"), # noqa "wiredTiger.cache.pages evicted because they exceeded the in-memory maximum": (RATE, "wiredTiger.cache.pages_evicted_exceeding_the_in-memory_maximum"), # noqa "wiredTiger.cache.pages evicted by application threads": RATE, "wiredTiger.cache.tracked dirty bytes in the cache": (GAUGE, "wiredTiger.cache.tracked_dirty_bytes_in_cache"), # noqa "wiredTiger.cache.unmodified pages evicted": GAUGE, "wiredTiger.concurrentTransactions.read.available": GAUGE, "wiredTiger.concurrentTransactions.read.out": GAUGE, "wiredTiger.concurrentTransactions.read.totalTickets": GAUGE, "wiredTiger.concurrentTransactions.write.available": GAUGE, "wiredTiger.concurrentTransactions.write.out": GAUGE, "wiredTiger.concurrentTransactions.write.totalTickets": GAUGE, } """ Usage statistics for each collection. https://docs.mongodb.org/v3.0/reference/command/top/ """ TOP_METRICS = { "commands.count": GAUGE, "commands.time": GAUGE, "getmore.count": GAUGE, "getmore.time": GAUGE, "insert.count": GAUGE, "insert.time": GAUGE, "queries.count": GAUGE, "queries.time": GAUGE, "readLock.count": GAUGE, "readLock.time": GAUGE, "remove.count": GAUGE, "remove.time": GAUGE, "total.count": GAUGE, "total.time": GAUGE, "update.count": GAUGE, "update.time": GAUGE, "writeLock.count": GAUGE, "writeLock.time": GAUGE, } COLLECTION_METRICS = { 'collection.size': GAUGE, 'collection.avgObjSize': GAUGE, 'collection.count': GAUGE, 'collection.capped': GAUGE, 'collection.max': GAUGE, 'collection.maxSize': GAUGE, 'collection.storageSize': GAUGE, 'collection.nindexes': GAUGE, 'collection.indexSizes': GAUGE, } """ Mapping for case-sensitive metric name suffixes. https://docs.mongodb.org/manual/reference/command/serverStatus/#server-status-locks """ CASE_SENSITIVE_METRIC_NAME_SUFFIXES = { '\.R\\b': ".shared", '\.r\\b': ".intent_shared", '\.W\\b': ".exclusive", '\.w\\b': ".intent_exclusive", } """ Metrics collected by default. """ DEFAULT_METRICS = { 'base': BASE_METRICS, 'durability': DURABILITY_METRICS, 'locks': LOCKS_METRICS, 'wiredtiger': WIREDTIGER_METRICS, } """ Additional metrics by category. """ AVAILABLE_METRICS = { 'metrics.commands': COMMANDS_METRICS, 'tcmalloc': TCMALLOC_METRICS, 'top': TOP_METRICS, 'collection': COLLECTION_METRICS, } # Replication states """ MongoDB replica set states, as documented at https://docs.mongodb.org/manual/reference/replica-states/ """ REPLSET_MEMBER_STATES = { 0: ('STARTUP', 'Starting Up'), 1: ('PRIMARY', 'Primary'), 2: ('SECONDARY', 'Secondary'), 3: ('RECOVERING', 'Recovering'), 4: ('Fatal', 'Fatal'), # MongoDB docs don't list this state 5: ('STARTUP2', 'Starting up (forking threads)'), 6: ('UNKNOWN', 'Unknown to this replset member'), 7: ('ARBITER', 'Arbiter'), 8: ('DOWN', 'Down'), 9: ('ROLLBACK', 'Rollback'), 10: ('REMOVED', 'Removed'), } def __init__(self, name, init_config, agentConfig, instances=None): AgentCheck.__init__(self, name, init_config, agentConfig, instances) # Members' last replica set states self._last_state_by_server = {} # List of metrics to collect per instance self.metrics_to_collect_by_instance = {} self.collection_metrics_names = [] for (key, value) in self.COLLECTION_METRICS.iteritems(): self.collection_metrics_names.append(key.split('.')[1]) def get_library_versions(self): return {"pymongo": pymongo.version} def get_state_description(self, state): if state in self.REPLSET_MEMBER_STATES: return self.REPLSET_MEMBER_STATES[state][1] else: return 'Replset state %d is unknown to the Server Density agent' % state def get_state_name(self, state): if state in self.REPLSET_MEMBER_STATES: return self.REPLSET_MEMBER_STATES[state][0] else: return 'UNKNOWN' def _report_replica_set_state(self, state, clean_server_name, replset_name, agentConfig): """ Report the member's replica set state * Submit a service check. * Create an event on state change. """ last_state = self._last_state_by_server.get(clean_server_name, -1) self._last_state_by_server[clean_server_name] = state if last_state != state and last_state != -1: return self.create_event(last_state, state, clean_server_name, replset_name, agentConfig) def hostname_for_event(self, clean_server_name, agentConfig): """Return a reasonable hostname for a replset membership event to mention.""" uri = urlsplit(clean_server_name) if '@' in uri.netloc: hostname = uri.netloc.split('@')[1].split(':')[0] else: hostname = uri.netloc.split(':')[0] if hostname == 'localhost': hostname = self.hostname return hostname def create_event(self, last_state, state, clean_server_name, replset_name, agentConfig): """Create an event with a message describing the replication state of a mongo node""" status = self.get_state_description(state) short_status = self.get_state_name(state) last_short_status = self.get_state_name(last_state) hostname = self.hostname_for_event(clean_server_name, agentConfig) msg_title = "%s is %s for %s" % (hostname, short_status, replset_name) msg = "MongoDB %s (%s) just reported as %s (%s) for %s; it was %s before." % (hostname, clean_server_name, status, short_status, replset_name, last_short_status) self.event({ 'timestamp': int(time.time()), 'source_type_name': self.SOURCE_TYPE_NAME, 'msg_title': msg_title, 'msg_text': msg, 'host': hostname, 'tags': [ 'action:mongo_replset_member_status_change', 'member_status:' + short_status, 'previous_member_status:' + last_short_status, 'replset:' + replset_name, ] }) def _build_metric_list_to_collect(self, additional_metrics): """ Build the metric list to collect based on the instance preferences. """ metrics_to_collect = {} # Defaut metrics for default_metrics in self.DEFAULT_METRICS.itervalues(): metrics_to_collect.update(default_metrics) # Additional metrics metrics for option in additional_metrics: additional_metrics = self.AVAILABLE_METRICS.get(option) if not additional_metrics: if option in self.DEFAULT_METRICS: self.log.warning( u"`%s` option is deprecated." u" The corresponding metrics are collected by default.", option ) else: self.log.warning( u"Failed to extend the list of metrics to collect:" u" unrecognized `%s` option", option ) continue self.log.debug( u"Adding `%s` corresponding metrics to the list" u" of metrics to collect.", option ) metrics_to_collect.update(additional_metrics) return metrics_to_collect def _get_metrics_to_collect(self, instance_key, additional_metrics): """ Return and cache the list of metrics to collect. """ if instance_key not in self.metrics_to_collect_by_instance: self.metrics_to_collect_by_instance[instance_key] = \ self._build_metric_list_to_collect(additional_metrics) return self.metrics_to_collect_by_instance[instance_key] def _resolve_metric(self, original_metric_name, metrics_to_collect, prefix=""): """ Return the submit method and the metric name to use. The metric name is defined as follow: * If available, the normalized metric name alias * (Or) the normalized original metric name """ submit_method = metrics_to_collect[original_metric_name][0] \ if isinstance(metrics_to_collect[original_metric_name], tuple) \ else metrics_to_collect[original_metric_name] metric_name = metrics_to_collect[original_metric_name][1] \ if isinstance(metrics_to_collect[original_metric_name], tuple) \ else original_metric_name return submit_method, self._normalize(metric_name, submit_method, prefix) def _normalize(self, metric_name, submit_method, prefix): """ Replace case-sensitive metric name characters, normalize the metric name, prefix and suffix according to its type. """ metric_prefix = "mongodb." if not prefix else "mongodb.{0}.".format(prefix) metric_suffix = "ps" if submit_method == RATE else "" # Replace case-sensitive metric name characters for pattern, repl in self.CASE_SENSITIVE_METRIC_NAME_SUFFIXES.iteritems(): metric_name = re.compile(pattern).sub(repl, metric_name) # Normalize, and wrap return u"{metric_prefix}{normalized_metric_name}{metric_suffix}".format( normalized_metric_name=self.normalize(metric_name.lower()), metric_prefix=metric_prefix, metric_suffix=metric_suffix ) def _authenticate(self, database, username, password, use_x509, server_name, service_check_tags): """ Authenticate to the database. Available mechanisms: * Username & password * X.509 More information: https://api.mongodb.com/python/current/examples/authentication.html """ authenticated = False try: # X.509 if use_x509: self.log.debug( u"Authenticate `%s` to `%s` using `MONGODB-X509` mechanism", username, database ) authenticated = database.authenticate(username, mechanism='MONGODB-X509') # Username & password else: authenticated = database.authenticate(username, password) except pymongo.errors.PyMongoError as e: self.log.error( u"Authentication failed due to invalid credentials or configuration issues. %s", e ) if not authenticated: message = ("Mongo: cannot connect with config %s" % server_name) self.service_check( self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=message) raise Exception(message) return authenticated def _parse_uri(self, server, sanitize_username=False): """ Parses a MongoDB-formatted URI (e.g. mongodb://user:pass@server/db) and returns parsed elements and a sanitized URI. """ parsed = pymongo.uri_parser.parse_uri(server) username = parsed.get('username') password = parsed.get('password') db_name = parsed.get('database') nodelist = parsed.get('nodelist') auth_source = parsed.get('options', {}).get('authsource') # Remove password (and optionally username) from sanitized server URI. # To ensure that the `replace` works well, we first need to url-decode the raw server string # since the password parsed by pymongo is url-decoded decoded_server = urllib.unquote_plus(server) clean_server_name = decoded_server.replace(password, "*" * 5) if password else decoded_server if sanitize_username and username: username_pattern = u"{}[@:]".format(re.escape(username)) clean_server_name = re.sub(username_pattern, "", clean_server_name) return username, password, db_name, nodelist, clean_server_name, auth_source def _collect_indexes_stats(self, instance, db, tags): """ Collect indexes statistics for all collections in the configuration. This use the "$indexStats" command. """ for coll_name in instance.get('collections', []): try: for stats in db[coll_name].aggregate([{"$indexStats": {}}], cursor={}): idx_tags = tags + [ "name:{0}".format(stats.get('name', 'unknown')), "collection:{0}".format(coll_name), ] self.gauge('mongodb.collection.indexes.accesses.ops', int(stats.get('accesses', {}).get('ops', 0)), idx_tags) except Exception as e: self.log.error("Could not fetch indexes stats for collection %s: %s", coll_name, e) def check(self, instance): """ Returns a dictionary that looks a lot like what's sent back by db.serverStatus() """ def total_seconds(td): """ Returns total seconds of a timedelta in a way that's safe for Python < 2.7 """ if hasattr(td, 'total_seconds'): return td.total_seconds() else: return ( lag.microseconds + (lag.seconds + lag.days * 24 * 3600) * 10**6 ) / 10.0**6 if 'server' not in instance: raise Exception("Missing 'server' in mongo config") # x.509 authentication ssl_params = { 'ssl': instance.get('ssl', None), 'ssl_keyfile': instance.get('ssl_keyfile', None), 'ssl_certfile': instance.get('ssl_certfile', None), 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None), 'ssl_ca_certs': instance.get('ssl_ca_certs', None) } for key, param in ssl_params.items(): if param is None: del ssl_params[key] server = instance['server'] username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params)) additional_metrics = instance.get('additional_metrics', []) # Get the list of metrics to collect collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics metrics_to_collect = self._get_metrics_to_collect( server, additional_metrics ) # Tagging tags = instance.get('tags', []) # ...de-dupe tags to avoid a memory leak tags = list(set(tags)) if not db_name: self.log.info('No MongoDB database found in URI. Defaulting to admin.') db_name = 'admin' service_check_tags = [ "db:%s" % db_name ] service_check_tags.extend(tags) # ...add the `server` tag to the metrics' tags only # (it's added in the backend for service checks) tags.append('server:%s' % clean_server_name) if nodelist: host = nodelist[0][0] port = nodelist[0][1] service_check_tags = service_check_tags + [ "host:%s" % host, "port:%s" % port ] timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000 try: cli = pymongo.mongo_client.MongoClient( server, socketTimeoutMS=timeout, connectTimeoutMS=timeout, serverSelectionTimeoutMS=timeout, read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED, **ssl_params) # some commands can only go against the admin DB admindb = cli['admin'] db = cli[db_name] except Exception: self.service_check( self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags) raise # Authenticate do_auth = True use_x509 = ssl_params and not password if not username: self.log.debug( u"A username is required to authenticate to `%s`", server ) do_auth = False if do_auth: if auth_source: self.log.info("authSource was specified in the the server URL: using '%s' as the authentication database", auth_source) self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags) else: self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags) try: status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics) except Exception: self.service_check( self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags) raise else: self.service_check( self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags) if status['ok'] == 0: raise Exception(status['errmsg'].__str__()) ops = db.current_op() status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0 status['stats'] = db.command('dbstats') dbstats = {} dbstats[db_name] = {'stats': status['stats']} # Handle replica data, if any # See # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa try: data = {} dbnames = [] replSet = admindb.command('replSetGetStatus') if replSet: primary = None current = None # need a new connection to deal with replica sets setname = replSet.get('set') cli_rs = pymongo.mongo_client.MongoClient( server, socketTimeoutMS=timeout, connectTimeoutMS=timeout, serverSelectionTimeoutMS=timeout, replicaset=setname, read_preference=pymongo.ReadPreference.NEAREST, **ssl_params) if do_auth: if auth_source: self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags) else: self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags) # Replication set information replset_name = replSet['set'] replset_state = self.get_state_name(replSet['myState']).lower() tags.extend([ u"replset_name:{0}".format(replset_name), u"replset_state:{0}".format(replset_state), ]) # Find nodes: master and current node (ourself) for member in replSet.get('members'): if member.get('self'): current = member if int(member.get('state')) == 1: primary = member # Compute a lag time if current is not None and primary is not None: if 'optimeDate' in primary and 'optimeDate' in current: lag = primary['optimeDate'] - current['optimeDate'] data['replicationLag'] = total_seconds(lag) if current is not None: data['health'] = current['health'] data['state'] = replSet['myState'] if current is not None: total = 0.0 cfg = cli_rs['local']['system.replset'].find_one() for member in cfg.get('members'): total += member.get('votes', 1) if member['_id'] == current['_id']: data['votes'] = member.get('votes', 1) data['voteFraction'] = data['votes'] / total status['replSet'] = data # Submit events self._report_replica_set_state( data['state'], clean_server_name, replset_name, self.agentConfig ) except Exception as e: if "OperationFailure" in repr(e) and "not running with --replSet" in str(e): pass else: raise e # If these keys exist, remove them for now as they cannot be serialized try: status['backgroundFlushing'].pop('last_finished') except KeyError: pass try: status.pop('localTime') except KeyError: pass dbnames = cli.database_names() self.gauge('mongodb.dbs', len(dbnames), tags=tags) for db_n in dbnames: db_aux = cli[db_n] dbstats[db_n] = {'stats': db_aux.command('dbstats')} # Go through the metrics and save the values for metric_name in metrics_to_collect: # each metric is of the form: x.y.z with z optional # and can be found at status[x][y][z] value = status if metric_name.startswith('stats'): continue else: try: for c in metric_name.split("."): value = value[c] except KeyError: continue # value is now status[x][y][z] if not isinstance(value, (int, long, float)): raise TypeError( u"{0} value is a {1}, it should be an int, a float or a long instead." .format(metric_name, type(value))) # Submit the metric submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect) submit_method(self, metric_name_alias, value, tags=tags) for st, value in dbstats.iteritems(): for metric_name in metrics_to_collect: if not metric_name.startswith('stats.'): continue try: val = value['stats'][metric_name.split('.')[1]] except KeyError: continue # value is now status[x][y][z] if not isinstance(val, (int, long, float)): raise TypeError( u"{0} value is a {1}, it should be an int, a float or a long instead." .format(metric_name, type(val)) ) # Submit the metric metrics_tags = ( tags + [ u"cluster:db:{0}".format(st), # FIXME 6.0 - keep for backward compatibility u"db:{0}".format(st), ] ) submit_method, metric_name_alias = \ self._resolve_metric(metric_name, metrics_to_collect) submit_method(self, metric_name_alias, val, tags=metrics_tags) if _is_affirmative(instance.get('collections_indexes_stats')): mongo_version = cli.server_info().get('version', '0.0') if LooseVersion(mongo_version) >= LooseVersion("3.2"): self._collect_indexes_stats(instance, db, tags) else: self.log.error("'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s", mongo_version) # Report the usage metrics for dbs/collections if 'top' in additional_metrics: try: dbtop = db.command('top') for ns, ns_metrics in dbtop['totals'].iteritems(): if "." not in ns: continue # configure tags for db name and collection name dbname, collname = ns.split(".", 1) ns_tags = tags + ["db:%s" % dbname, "collection:%s" % collname] # iterate over DBTOP metrics for m in self.TOP_METRICS: # each metric is of the form: x.y.z with z optional # and can be found at ns_metrics[x][y][z] value = ns_metrics try: for c in m.split("."): value = value[c] except Exception: continue # value is now status[x][y][z] if not isinstance(value, (int, long, float)): raise TypeError( u"{0} value is a {1}, it should be an int, a float or a long instead." .format(m, type(value)) ) # Submit the metric submit_method, metric_name_alias = \ self._resolve_metric(m, metrics_to_collect, prefix="usage") submit_method(self, metric_name_alias, value, tags=ns_tags) except Exception as e: self.log.warning('Failed to record `top` metrics %s' % str(e)) if 'local' in dbnames: # it might not be if we are connectiing through mongos # Fetch information analogous to Mongo's db.getReplicationInfo() localdb = cli['local'] oplog_data = {} for ol_collection_name in ("oplog.rs", "oplog.$main"): ol_options = localdb[ol_collection_name].options() if ol_options: break if ol_options: try: oplog_data['logSizeMB'] = round( ol_options['size'] / 2.0 ** 20, 2 ) oplog = localdb[ol_collection_name] oplog_data['usedSizeMB'] = round( localdb.command("collstats", ol_collection_name)['size'] / 2.0 ** 20, 2 ) op_asc_cursor = oplog.find({"ts": {"$exists": 1}}).sort("$natural", pymongo.ASCENDING).limit(1) op_dsc_cursor = oplog.find({"ts": {"$exists": 1}}).sort("$natural", pymongo.DESCENDING).limit(1) try: first_timestamp = op_asc_cursor[0]['ts'].as_datetime() last_timestamp = op_dsc_cursor[0]['ts'].as_datetime() oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp) except (IndexError, KeyError): # if the oplog collection doesn't have any entries # if an object in the collection doesn't have a ts value, we ignore it pass except KeyError: # encountered an error trying to access options.size for the oplog collection self.log.warning(u"Failed to record `ReplicationInfo` metrics.") for (m, value) in oplog_data.iteritems(): submit_method, metric_name_alias = \ self._resolve_metric('oplog.%s' % m, metrics_to_collect) submit_method(self, metric_name_alias, value, tags=tags) else: self.log.debug('"local" database not in dbnames. Not collecting ReplicationInfo metrics') # get collection level stats try: # Ensure that you're on the right db db = cli[db_name] # grab the collections from the configutation coll_names = instance.get('collections', []) # loop through the collections for coll_name in coll_names: # grab the stats from the collection stats = db.command("collstats", coll_name) # loop through the metrics for m in self.collection_metrics_names: coll_tags = tags + ["db:%s" % db_name, "collection:%s" % coll_name] value = stats.get(m, None) if not value: continue # if it's the index sizes, then it's a dict. if m == 'indexSizes': submit_method, metric_name_alias = \ self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS) # loop through the indexes for (idx, val) in value.iteritems(): # we tag the index idx_tags = coll_tags + ["index:%s" % idx] submit_method(self, metric_name_alias, val, tags=idx_tags) else: submit_method, metric_name_alias = \ self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS) submit_method(self, metric_name_alias, value, tags=coll_tags) except Exception as e: self.log.warning(u"Failed to record `collection` metrics.") self.log.exception(e)
""" homeassistant.components.light.limitlessled ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for LimitlessLED bulbs. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/light.limitlessled/ """ import logging from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_RGB_COLOR, ATTR_TRANSITION, EFFECT_COLORLOOP, EFFECT_WHITE, FLASH_LONG, Light) _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['limitlessled==1.0.0'] RGB_BOUNDARY = 40 DEFAULT_TRANSITION = 0 DEFAULT_PORT = 8899 DEFAULT_VERSION = 5 DEFAULT_LED_TYPE = 'rgbw' WHITE = [255, 255, 255] def rewrite_legacy(config): """ Rewrite legacy configuration to new format. """ bridges = config.get('bridges', [config]) new_bridges = [] for bridge_conf in bridges: groups = [] if 'groups' in bridge_conf: groups = bridge_conf['groups'] else: _LOGGER.warning("Legacy configuration format detected") for i in range(1, 5): name_key = 'group_%d_name' % i if name_key in bridge_conf: groups.append({ 'number': i, 'type': bridge_conf.get('group_%d_type' % i, DEFAULT_LED_TYPE), 'name': bridge_conf.get(name_key) }) new_bridges.append({ 'host': bridge_conf.get('host'), 'groups': groups }) return {'bridges': new_bridges} def setup_platform(hass, config, add_devices_callback, discovery_info=None): """ Gets the LimitlessLED lights. """ from limitlessled.bridge import Bridge # Two legacy configuration formats are supported to # maintain backwards compatibility. config = rewrite_legacy(config) # Use the expanded configuration format. lights = [] for bridge_conf in config.get('bridges'): bridge = Bridge(bridge_conf.get('host'), port=bridge_conf.get('port', DEFAULT_PORT), version=bridge_conf.get('version', DEFAULT_VERSION)) for group_conf in bridge_conf.get('groups'): group = bridge.add_group(group_conf.get('number'), group_conf.get('name'), group_conf.get('type', DEFAULT_LED_TYPE)) lights.append(LimitlessLEDGroup.factory(group)) add_devices_callback(lights) def state(new_state): """ State decorator. Specify True (turn on) or False (turn off). """ def decorator(function): """ Decorator function. """ # pylint: disable=no-member,protected-access def wrapper(self, **kwargs): """ Wrap a group state change. """ from limitlessled.pipeline import Pipeline pipeline = Pipeline() transition_time = DEFAULT_TRANSITION # Stop any repeating pipeline. if self.repeating: self.repeating = False self.group.stop() # Not on and should be? Turn on. if not self.is_on and new_state is True: pipeline.on() # Set transition time. if ATTR_TRANSITION in kwargs: transition_time = kwargs[ATTR_TRANSITION] # Do group type-specific work. function(self, transition_time, pipeline, **kwargs) # Update state. self._is_on = new_state self.group.enqueue(pipeline) self.update_ha_state() return wrapper return decorator class LimitlessLEDGroup(Light): """ LimitessLED group. """ def __init__(self, group): """ Initialize a group. """ self.group = group self.repeating = False self._is_on = False self._brightness = None @staticmethod def factory(group): """ Produce LimitlessLEDGroup objects. """ from limitlessled.group.rgbw import RgbwGroup from limitlessled.group.white import WhiteGroup if isinstance(group, WhiteGroup): return LimitlessLEDWhiteGroup(group) elif isinstance(group, RgbwGroup): return LimitlessLEDRGBWGroup(group) @property def should_poll(self): """ No polling needed. LimitlessLED state cannot be fetched. """ return False @property def name(self): """ Returns the name of the group. """ return self.group.name @property def is_on(self): """ True if device is on. """ return self._is_on @property def brightness(self): """ Brightness property. """ return self._brightness @state(False) def turn_off(self, transition_time, pipeline, **kwargs): """ Turn off a group. """ if self.is_on: pipeline.transition(transition_time, brightness=0.0).off() class LimitlessLEDWhiteGroup(LimitlessLEDGroup): """ LimitlessLED White group. """ def __init__(self, group): """ Initialize White group. """ super().__init__(group) # Initialize group with known values. self.group.on = True self.group.temperature = 1.0 self.group.brightness = 0.0 self._brightness = _to_hass_brightness(1.0) self._temperature = _to_hass_temperature(self.group.temperature) self.group.on = False @property def color_temp(self): """ Temperature property. """ return self._temperature @state(True) def turn_on(self, transition_time, pipeline, **kwargs): """ Turn on (or adjust property of) a group. """ # Check arguments. if ATTR_BRIGHTNESS in kwargs: self._brightness = kwargs[ATTR_BRIGHTNESS] if ATTR_COLOR_TEMP in kwargs: self._temperature = kwargs[ATTR_COLOR_TEMP] # Set up transition. pipeline.transition(transition_time, brightness=_from_hass_brightness( self._brightness), temperature=_from_hass_temperature( self._temperature)) class LimitlessLEDRGBWGroup(LimitlessLEDGroup): """ LimitlessLED RGBW group. """ def __init__(self, group): """ Initialize RGBW group. """ super().__init__(group) # Initialize group with known values. self.group.on = True self.group.white() self._color = WHITE self.group.brightness = 0.0 self._brightness = _to_hass_brightness(1.0) self.group.on = False @property def rgb_color(self): """ Color property. """ return self._color @state(True) def turn_on(self, transition_time, pipeline, **kwargs): """ Turn on (or adjust property of) a group. """ from limitlessled.presets import COLORLOOP # Check arguments. if ATTR_BRIGHTNESS in kwargs: self._brightness = kwargs[ATTR_BRIGHTNESS] if ATTR_RGB_COLOR in kwargs: self._color = kwargs[ATTR_RGB_COLOR] # White is a special case. if min(self._color) > 256 - RGB_BOUNDARY: pipeline.white() self._color = WHITE # Set up transition. pipeline.transition(transition_time, brightness=_from_hass_brightness( self._brightness), color=_from_hass_color(self._color)) # Flash. if ATTR_FLASH in kwargs: duration = 0 if kwargs[ATTR_FLASH] == FLASH_LONG: duration = 1 pipeline.flash(duration=duration) # Add effects. if ATTR_EFFECT in kwargs: if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP: self.repeating = True pipeline.append(COLORLOOP) if kwargs[ATTR_EFFECT] == EFFECT_WHITE: pipeline.white() self._color = WHITE def _from_hass_temperature(temperature): """ Convert Home Assistant color temperature units to percentage. """ return (temperature - 154) / 346 def _to_hass_temperature(temperature): """ Convert percentage to Home Assistant color temperature units. """ return int(temperature * 346) + 154 def _from_hass_brightness(brightness): """ Convert Home Assistant brightness units to percentage. """ return brightness / 255 def _to_hass_brightness(brightness): """ Convert percentage to Home Assistant brightness units. """ return int(brightness * 255) def _from_hass_color(color): """ Convert Home Assistant RGB list to Color tuple. """ from limitlessled import Color return Color(*tuple(color)) def _to_hass_color(color): """ Convert from Color tuple to Home Assistant RGB list. """ return list([int(c) for c in color])
# Copyright 2015 Cray # Copyright 2016 FUJITSU LIMITED # Copyright 2017 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import time import fixtures from oslo_config import cfg from oslo_config import fixture as fixture_config import testtools from sqlalchemy import delete, MetaData, insert, bindparam from monasca_api.common.repositories.sqla import models CONF = cfg.CONF class TestAlarmRepoDB(testtools.TestCase, fixtures.TestWithFixtures): @classmethod def setUpClass(cls): from sqlalchemy import engine_from_config engine = engine_from_config({'url': 'sqlite://'}, prefix='') qry = open('monasca_api/tests/sqlite_alarm.sql', 'r').read() sconn = engine.raw_connection() c = sconn.cursor() c.executescript(qry) sconn.commit() c.close() cls.engine = engine def _fake_engine_from_config(*args, **kw): return cls.engine cls.fixture = fixtures.MonkeyPatch( 'sqlalchemy.create_engine', _fake_engine_from_config) cls.fixture.setUp() metadata = MetaData() cls.aa = models.create_aa_model(metadata) cls._delete_aa_query = delete(cls.aa) cls._insert_aa_query = (insert(cls.aa) .values( alarm_definition_id=bindparam('alarm_definition_id'), alarm_state=bindparam('alarm_state'), action_id=bindparam('action_id'))) cls.ad = models.create_ad_model(metadata) cls._delete_ad_query = delete(cls.ad) cls._insert_ad_query = (insert(cls.ad) .values( id=bindparam('id'), tenant_id=bindparam('tenant_id'), name=bindparam('name'), severity=bindparam('severity'), expression=bindparam('expression'), match_by=bindparam('match_by'), actions_enabled=bindparam('actions_enabled'), created_at=bindparam('created_at'), updated_at=bindparam('updated_at'), deleted_at=bindparam('deleted_at'))) cls.sad = models.create_sad_model(metadata) cls._delete_sad_query = delete(cls.sad) cls._insert_sad_query = (insert(cls.sad) .values( id=bindparam('id'), alarm_definition_id=bindparam('alarm_definition_id'), function=bindparam('function'), metric_name=bindparam('metric_name'), operator=bindparam('operator'), threshold=bindparam('threshold'), period=bindparam('period'), periods=bindparam('periods'), created_at=bindparam('created_at'), updated_at=bindparam('updated_at'))) cls.sadd = models.create_sadd_model(metadata) cls._delete_sadd_query = delete(cls.sadd) cls._insert_sadd_query = (insert(cls.sadd) .values( sub_alarm_definition_id=bindparam('sub_alarm_definition_id'), dimension_name=bindparam('dimension_name'), value=bindparam('value'))) cls.nm = models.create_nm_model(metadata) cls._delete_nm_query = delete(cls.nm) cls._insert_nm_query = (insert(cls.nm) .values( id=bindparam('id'), tenant_id=bindparam('tenant_id'), name=bindparam('name'), type=bindparam('type'), address=bindparam('address'), created_at=bindparam('created_at'), updated_at=bindparam('updated_at'))) cls.a = models.create_a_model(metadata) cls._delete_a_query = delete(cls.a) cls._insert_a_query = (insert(cls.a) .values( id=bindparam('id'), alarm_definition_id=bindparam('alarm_definition_id'), state=bindparam('state'), lifecycle_state=bindparam('lifecycle_state'), link=bindparam('link'), created_at=bindparam('created_at'), updated_at=bindparam('updated_at'), state_updated_at=bindparam('state_updated_at'))) cls.sa = models.create_sa_model(metadata) cls._delete_sa_query = delete(cls.sa) cls._insert_sa_query = (insert(cls.sa) .values( id=bindparam('id'), sub_expression_id=bindparam('sub_expression_id'), alarm_id=bindparam('alarm_id'), expression=bindparam('expression'), created_at=bindparam('created_at'), updated_at=bindparam('updated_at'))) cls.am = models.create_am_model(metadata) cls._delete_am_query = delete(cls.am) cls._insert_am_query = (insert(cls.am) .values( alarm_id=bindparam('alarm_id'), metric_definition_dimensions_id=bindparam( 'metric_definition_dimensions_id'))) cls.md = models.create_md_model(metadata) cls._delete_md_query = delete(cls.md) cls._insert_md_query = (insert(cls.md) .values( dimension_set_id=bindparam('dimension_set_id'), name=bindparam('name'), value=bindparam('value'))) cls.mdd = models.create_mdd_model(metadata) cls._delete_mdd_query = delete(cls.mdd) cls._insert_mdd_query = (insert(cls.mdd) .values( id=bindparam('id'), metric_definition_id=bindparam('metric_definition_id'), metric_dimension_set_id=bindparam('metric_dimension_set_id'))) cls.mde = models.create_mde_model(metadata) cls._delete_mde_query = delete(cls.mde) cls._insert_mde_query = (insert(cls.mde) .values( id=bindparam('id'), name=bindparam('name'), tenant_id=bindparam('tenant_id'), region=bindparam('region'))) @classmethod def tearDownClass(cls): cls.fixture.cleanUp() def setUp(self): super(TestAlarmRepoDB, self).setUp() self._fixture_config = self.useFixture( fixture_config.Config(cfg.CONF)) self._fixture_config.config(url='sqlite://', group='database') from monasca_api.common.repositories.sqla import alarms_repository as ar self.repo = ar.AlarmsRepository() timestamp1 = datetime.datetime(2015, 3, 14, 9, 26, 53) timestamp2 = datetime.datetime(2015, 3, 14, 9, 26, 54) timestamp3 = datetime.datetime(2015, 3, 14, 9, 26, 55) timestamp4 = datetime.datetime(2015, 3, 15, 9, 26, 53) self.default_as = [{'id': '1', 'alarm_definition_id': '1', 'state': 'OK', 'lifecycle_state': 'OPEN', 'link': 'http://somesite.com/this-alarm-info', 'created_at': timestamp1, 'updated_at': timestamp1, 'state_updated_at': timestamp1}, {'id': '2', 'alarm_definition_id': '1', 'state': 'UNDETERMINED', 'lifecycle_state': 'OPEN', 'link': 'http://somesite.com/this-alarm-info', 'created_at': timestamp2, 'updated_at': timestamp2, 'state_updated_at': timestamp2}, {'id': '3', 'alarm_definition_id': '1', 'state': 'ALARM', 'lifecycle_state': None, 'link': 'http://somesite.com/this-alarm-info', 'created_at': timestamp3, 'updated_at': timestamp3, 'state_updated_at': timestamp3}, {'id': '234111', 'alarm_definition_id': '234', 'state': 'UNDETERMINED', 'lifecycle_state': None, 'link': None, 'created_at': timestamp4, 'updated_at': timestamp4, 'state_updated_at': timestamp4}] self.default_ads = [{'id': '1', 'tenant_id': 'bob', 'name': '90% CPU', 'severity': 'LOW', 'expression': 'AVG(cpu.idle_perc{flavor_id=777,' ' image_id=888, device=1}) > 10', 'match_by': 'flavor_id,image_id', 'actions_enabled': False, 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now(), 'deleted_at': None}, {'id': '234', 'tenant_id': 'bob', 'name': '50% CPU', 'severity': 'LOW', 'expression': 'AVG(cpu.sys_mem' '{service=monitoring})' ' > 20 and AVG(cpu.idle_perc' '{service=monitoring}) < 10', 'match_by': 'hostname,region', 'actions_enabled': False, 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now(), 'deleted_at': None}] self.default_sadds = [{'sub_alarm_definition_id': '111', 'dimension_name': 'flavor_id', 'value': '777'}, {'sub_alarm_definition_id': '111', 'dimension_name': 'image_id', 'value': '888'}, {'sub_alarm_definition_id': '111', 'dimension_name': 'metric_name', 'value': 'cpu'}, {'sub_alarm_definition_id': '111', 'dimension_name': 'device', 'value': '1'}, {'sub_alarm_definition_id': '222', 'dimension_name': 'flavor_id', 'value': '777'}, {'sub_alarm_definition_id': '222', 'dimension_name': 'image_id', 'value': '888'}, {'sub_alarm_definition_id': '222', 'dimension_name': 'metric_name', 'value': 'mem'}] self.default_nms = [{'id': '29387234', 'tenant_id': 'alarm-test', 'name': 'MyEmail', 'type': 'EMAIL', 'address': 'a@b', 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now()}, {'id': '77778687', 'tenant_id': 'alarm-test', 'name': 'OtherEmail', 'type': 'EMAIL', 'address': 'a@b', 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now()}] self.default_aas = [{'alarm_definition_id': '123', 'alarm_state': 'ALARM', 'action_id': '29387234'}, {'alarm_definition_id': '123', 'alarm_state': 'ALARM', 'action_id': '77778687'}, {'alarm_definition_id': '234', 'alarm_state': 'ALARM', 'action_id': '29387234'}, {'alarm_definition_id': '234', 'alarm_state': 'ALARM', 'action_id': '77778687'}] self.default_sads = [{'id': '43', 'alarm_definition_id': '234', 'function': 'f_43', 'metric_name': 'm_43', 'operator': 'GT', 'threshold': 0, 'period': 1, 'periods': 2, 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now()}, {'id': '45', 'alarm_definition_id': '234', 'function': 'f_45', 'metric_name': 'm_45', 'operator': 'GT', 'threshold': 0, 'period': 1, 'periods': 2, 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now()}, {'id': '47', 'alarm_definition_id': '234', 'function': 'f_47', 'metric_name': 'm_47', 'operator': 'GT', 'threshold': 0, 'period': 1, 'periods': 2, 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now()}, {'id': '8484', 'alarm_definition_id': '234', 'function': 'f_49', 'metric_name': 'm_49', 'operator': 'GT', 'threshold': 0, 'period': 1, 'periods': 2, 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now()}, {'id': '8686', 'alarm_definition_id': '234', 'function': 'f_51', 'metric_name': 'm_51', 'operator': 'GT', 'threshold': 0, 'period': 1, 'periods': 2, 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now()}] self.default_sas = [{'sub_expression_id': '43', 'id': '42', 'alarm_id': '1', 'expression': 'avg(cpu.idle_perc{flavor_id=777,' ' image_id=888, device=1}) > 10', 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now()}, {'sub_expression_id': '45', 'id': '43', 'alarm_id': '2', 'expression': 'avg(cpu.idle_perc{flavor_id=777,' ' image_id=888, device=1}) > 10', 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now()}, {'sub_expression_id': '47', 'id': '44', 'alarm_id': '3', 'expression': 'avg(cpu.idle_perc{flavor_id=777,' ' image_id=888, device=1}) > 10', 'created_at': datetime.datetime.now(), 'updated_at': datetime.datetime.now()}] self.default_ams = [{'alarm_id': '1', 'metric_definition_dimensions_id': '11'}, {'alarm_id': '1', 'metric_definition_dimensions_id': '22'}, {'alarm_id': '2', 'metric_definition_dimensions_id': '11'}, {'alarm_id': '3', 'metric_definition_dimensions_id': '22'}, {'alarm_id': '234111', 'metric_definition_dimensions_id': '31'}, {'alarm_id': '234111', 'metric_definition_dimensions_id': '32'}] self.default_mdes = [{'id': '1', 'name': 'cpu.idle_perc', 'tenant_id': 'bob', 'region': 'west'}, {'id': '111', 'name': 'cpu.sys_mem', 'tenant_id': 'bob', 'region': 'west'}, {'id': '112', 'name': 'cpu.idle_perc', 'tenant_id': 'bob', 'region': 'west'}] self.default_mdds = [{'id': '11', 'metric_definition_id': '1', 'metric_dimension_set_id': '1'}, {'id': '22', 'metric_definition_id': '1', 'metric_dimension_set_id': '2'}, {'id': '31', 'metric_definition_id': '111', 'metric_dimension_set_id': '21'}, {'id': '32', 'metric_definition_id': '112', 'metric_dimension_set_id': '22'}] self.default_mds = [{'dimension_set_id': '1', 'name': 'instance_id', 'value': '123'}, {'dimension_set_id': '1', 'name': 'service', 'value': 'monitoring'}, {'dimension_set_id': '2', 'name': 'flavor_id', 'value': '222'}, {'dimension_set_id': '21', 'name': 'service', 'value': 'monitoring'}, {'dimension_set_id': '22', 'name': 'service', 'value': 'monitoring'}, {'dimension_set_id': '21', 'name': 'hostname', 'value': 'roland'}, {'dimension_set_id': '22', 'name': 'hostname', 'value': 'roland'}, {'dimension_set_id': '21', 'name': 'region', 'value': 'colorado'}, {'dimension_set_id': '22', 'name': 'region', 'value': 'colorado'}, {'dimension_set_id': '22', 'name': 'extra', 'value': 'vivi'}] self.alarm1 = {'alarm_definition': {'id': '1', 'name': '90% CPU', 'severity': 'LOW'}, 'created_timestamp': '2015-03-14T09:26:53Z', 'id': '1', 'lifecycle_state': 'OPEN', 'link': 'http://somesite.com/this-alarm-info', 'metrics': [{'dimensions': {'instance_id': '123', 'service': 'monitoring'}, 'name': 'cpu.idle_perc'}, {'dimensions': {'flavor_id': '222'}, 'name': 'cpu.idle_perc'}], 'state': 'OK', 'state_updated_timestamp': '2015-03-14T09:26:53Z', 'updated_timestamp': '2015-03-14T09:26:53Z'} self.alarm2 = {'alarm_definition': {'id': '1', 'name': '90% CPU', 'severity': 'LOW'}, 'created_timestamp': '2015-03-14T09:26:54Z', 'id': '2', 'lifecycle_state': 'OPEN', 'link': 'http://somesite.com/this-alarm-info', 'metrics': [{'dimensions': {'instance_id': '123', 'service': 'monitoring'}, 'name': 'cpu.idle_perc'}], 'state': 'UNDETERMINED', 'state_updated_timestamp': '2015-03-14T09:26:54Z', 'updated_timestamp': '2015-03-14T09:26:54Z'} self.alarm_compound = {'alarm_definition': {'id': '234', 'name': '50% CPU', 'severity': 'LOW'}, 'created_timestamp': '2015-03-15T09:26:53Z', 'id': '234111', 'lifecycle_state': None, 'link': None, 'metrics': [ {'dimensions': {'hostname': 'roland', 'region': 'colorado', 'service': 'monitoring'}, 'name': 'cpu.sys_mem'}, {'dimensions': {'extra': 'vivi', 'hostname': 'roland', 'region': 'colorado', 'service': 'monitoring'}, 'name': 'cpu.idle_perc'}], 'state': 'UNDETERMINED', 'state_updated_timestamp': '2015-03-15T09:26:53Z', 'updated_timestamp': '2015-03-15T09:26:53Z'} self.alarm3 = {'alarm_definition': {'id': '1', 'name': '90% CPU', 'severity': 'LOW'}, 'created_timestamp': '2015-03-14T09:26:55Z', 'id': '3', 'lifecycle_state': None, 'link': 'http://somesite.com/this-alarm-info', 'metrics': [{'dimensions': {'flavor_id': '222'}, 'name': 'cpu.idle_perc'}], 'state': 'ALARM', 'state_updated_timestamp': '2015-03-14T09:26:55Z', 'updated_timestamp': '2015-03-14T09:26:55Z'} with self.engine.begin() as conn: conn.execute(self._delete_am_query) conn.execute(self._insert_am_query, self.default_ams) conn.execute(self._delete_md_query) conn.execute(self._insert_md_query, self.default_mds) conn.execute(self._delete_mdd_query) conn.execute(self._insert_mdd_query, self.default_mdds) conn.execute(self._delete_a_query) conn.execute(self._insert_a_query, self.default_as) conn.execute(self._delete_sa_query) conn.execute(self._insert_sa_query, self.default_sas) conn.execute(self._delete_mde_query) conn.execute(self._insert_mde_query, self.default_mdes) conn.execute(self._delete_ad_query) conn.execute(self._insert_ad_query, self.default_ads) conn.execute(self._delete_sad_query) conn.execute(self._insert_sad_query, self.default_sads) conn.execute(self._delete_sadd_query) conn.execute(self._insert_sadd_query, self.default_sadds) conn.execute(self._delete_nm_query) conn.execute(self._insert_nm_query, self.default_nms) conn.execute(self._delete_aa_query) conn.execute(self._insert_aa_query, self.default_aas) def helper_builder_result(self, alarm_rows): result = [] if not alarm_rows: return result # Forward declaration alarm = {} prev_alarm_id = None for alarm_row in alarm_rows: if prev_alarm_id != alarm_row['alarm_id']: if prev_alarm_id is not None: result.append(alarm) ad = {u'id': alarm_row['alarm_definition_id'], u'name': alarm_row['alarm_definition_name'], u'severity': alarm_row['severity'], } metrics = [] alarm = {u'id': alarm_row['alarm_id'], u'metrics': metrics, u'state': alarm_row['state'], u'lifecycle_state': alarm_row['lifecycle_state'], u'link': alarm_row['link'], u'state_updated_timestamp': alarm_row['state_updated_timestamp'].isoformat() + 'Z', u'updated_timestamp': alarm_row['updated_timestamp'].isoformat() + 'Z', u'created_timestamp': alarm_row['created_timestamp'].isoformat() + 'Z', u'alarm_definition': ad} prev_alarm_id = alarm_row['alarm_id'] dimensions = {} metric = {u'name': alarm_row['metric_name'], u'dimensions': dimensions} if alarm_row['metric_dimensions']: for dimension in alarm_row['metric_dimensions'].split(','): parsed_dimension = dimension.split('=') dimensions[parsed_dimension[0]] = parsed_dimension[1] metrics.append(metric) result.append(alarm) return result def test_should_delete(self): tenant_id = 'bob' alarm_id = '1' alarm1 = self.repo.get_alarm(tenant_id, alarm_id) alarm1 = self.helper_builder_result(alarm1) self.assertEqual(alarm1[0], self.alarm1) self.repo.delete_alarm(tenant_id, alarm_id) from monasca_api.common.repositories import exceptions self.assertRaises(exceptions.DoesNotExistException, self.repo.get_alarm, tenant_id, alarm_id) def test_should_throw_exception_on_delete(self): tenant_id = 'bob' from monasca_api.common.repositories import exceptions self.assertRaises(exceptions.DoesNotExistException, self.repo.delete_alarm, tenant_id, 'Not an alarm ID') def test_should_find_alarm_def(self): tenant_id = 'bob' alarm_id = '1' expected = {'actions_enabled': False, 'deleted_at': None, 'description': None, 'expression': 'AVG(cpu.idle_perc{flavor_id=777,' ' image_id=888, device=1}) > 10', 'id': '1', 'match_by': 'flavor_id,image_id', 'name': '90% CPU', 'severity': 'LOW', 'tenant_id': 'bob'} alarm_def = self.repo.get_alarm_definition(tenant_id, alarm_id) expected['created_at'] = alarm_def['created_at'] expected['updated_at'] = alarm_def['updated_at'] self.assertEqual(alarm_def, expected) from monasca_api.common.repositories import exceptions self.assertRaises(exceptions.DoesNotExistException, self.repo.get_alarm_definition, tenant_id, 'Not an alarm ID') def test_should_find(self): res = self.repo.get_alarms(tenant_id='Not a tenant id', limit=1) self.assertEqual(res, []) tenant_id = 'bob' res = self.repo.get_alarms(tenant_id=tenant_id, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm1, self.alarm2, self.alarm_compound, self.alarm3] self.assertEqual(res, expected) alarm_def_id = self.alarm_compound['alarm_definition']['id'] query_parms = {'alarm_definition_id': alarm_def_id} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm_compound] self.assertEqual(res, expected) query_parms = {'metric_name': 'cpu.sys_mem'} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm_compound] self.assertEqual(res, expected) query_parms = {'metric_name': 'cpu.idle_perc'} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm1, self.alarm2, self.alarm_compound, self.alarm3] self.assertEqual(res, expected) query_parms = {'metric_name': 'cpu.idle_perc', 'metric_dimensions': {'flavor_id': '222'}} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm1, self.alarm3] self.assertEqual(res, expected) query_parms = {'metric_name': 'cpu.idle_perc', 'metric_dimensions': {'service': 'monitoring', 'hostname': 'roland'}} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm_compound] self.assertEqual(res, expected) query_parms = {'state': 'UNDETERMINED'} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm2, self.alarm_compound] self.assertEqual(res, expected) alarm_def_id = self.alarm1['alarm_definition']['id'] query_parms = {'metric_name': 'cpu.idle_perc', 'metric_dimensions': {'service': 'monitoring'}, 'alarm_definition_id': alarm_def_id} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm1, self.alarm2] self.assertEqual(res, expected) alarm_def_id = self.alarm1['alarm_definition']['id'] query_parms = {'metric_name': 'cpu.idle_perc', 'alarm_definition_id': alarm_def_id} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm1, self.alarm2, self.alarm3] self.assertEqual(res, expected) alarm_def_id = self.alarm_compound['alarm_definition']['id'] query_parms = {'alarm_definition_id': alarm_def_id, 'state': 'UNDETERMINED'} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm_compound] self.assertEqual(res, expected) query_parms = {'metric_name': 'cpu.sys_mem', 'state': 'UNDETERMINED'} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm_compound] self.assertEqual(res, expected) query_parms = {'metric_name': 'cpu.idle_perc', 'metric_dimensions': {'service': 'monitoring'}, 'state': 'UNDETERMINED'} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm2, self.alarm_compound] self.assertEqual(res, expected) time_now = datetime.datetime.now().isoformat() + 'Z' query_parms = {'metric_name': 'cpu.idle_perc', 'metric_dimensions': {'service': 'monitoring'}, 'state': 'UNDETERMINED', 'state_updated_start_time': time_now} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [] self.assertEqual(res, expected) time_now = '2015-03-15T00:00:00.0Z' query_parms = {'state_updated_start_time': time_now} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm_compound] self.assertEqual(res, expected) time_now = '2015-03-14T00:00:00.0Z' query_parms = {'state_updated_start_time': time_now} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=1000) res = self.helper_builder_result(res) expected = [self.alarm1, self.alarm2, self.alarm_compound, self.alarm3] self.assertEqual(res, expected) query_parms = {'state_updated_start_time': time_now, 'link': 'http://google.com', 'lifecycle_state': 'OPEN'} res = self.repo.get_alarms(tenant_id=tenant_id, query_parms=query_parms, limit=None, offset='10') res = self.helper_builder_result(res) expected = [] self.assertEqual(res, expected) def test_should_update(self): tenant_id = 'bob' alarm_id = '2' alarm = self.repo.get_alarm(tenant_id, alarm_id) alarm = self.helper_builder_result(alarm)[0] original_state_updated_date = alarm['state_updated_timestamp'] original_updated_timestamp = alarm['updated_timestamp'] self.assertEqual(alarm['state'], 'UNDETERMINED') prev_state, _ = self.repo.update_alarm(tenant_id, alarm_id, 'OK', None, None) alarm_new = self.repo.get_alarm(tenant_id, alarm_id) alarm_new = self.helper_builder_result(alarm_new)[0] new_state_updated_date = alarm_new['state_updated_timestamp'] new_updated_timestamp = alarm_new['updated_timestamp'] self.assertNotEqual(original_updated_timestamp, new_updated_timestamp, 'updated_at did not change') self.assertNotEqual(original_state_updated_date, new_state_updated_date, 'state_updated_at did not change') alarm_tmp = tuple(alarm[k] for k in ('state', 'link', 'lifecycle_state')) self.assertEqual(alarm_tmp, prev_state) alarm['state_updated_timestamp'] = alarm_new['state_updated_timestamp'] alarm['updated_timestamp'] = alarm_new['updated_timestamp'] alarm['state'] = alarm_new['state'] alarm['link'] = alarm_new['link'] alarm['lifecycle_state'] = alarm_new['lifecycle_state'] self.assertEqual(alarm, alarm_new) time.sleep(1) prev_state, _ = self.repo.update_alarm(tenant_id, alarm_id, 'OK', None, None) alarm_unchanged = self.repo.get_alarm(tenant_id, alarm_id) alarm_unchanged = self.helper_builder_result(alarm_unchanged)[0] unchanged_state_updated_date = alarm_unchanged['state_updated_timestamp'] unchanged_updated_timestamp = alarm_unchanged['updated_timestamp'] self.assertNotEqual(unchanged_updated_timestamp, new_updated_timestamp, 'updated_at did not change') self.assertEqual(unchanged_state_updated_date, new_state_updated_date, 'state_updated_at did change') alarm_new_tmp = tuple(alarm_new[k] for k in ('state', 'link', 'lifecycle_state')) self.assertEqual(alarm_new_tmp, prev_state) def test_should_throw_exception_on_update(self): tenant_id = 'bob' alarm_id = 'Not real alarm id' from monasca_api.common.repositories import exceptions self.assertRaises(exceptions.DoesNotExistException, self.repo.update_alarm, tenant_id, alarm_id, 'UNDETERMINED', None, None) def test_get_alarm_metrics(self): alarm_id = '2' alarm_metrics = self.repo.get_alarm_metrics(alarm_id) expected = [{'alarm_id': '2', 'dimensions': 'instance_id=123,service=monitoring', 'name': 'cpu.idle_perc'}] self.assertEqual(alarm_metrics, expected) def test_get_subalarms(self): tenant_id = 'bob' alarm_id = '2' sub_alarms = self.repo.get_sub_alarms(tenant_id, alarm_id) expected = [{'alarm_definition_id': '1', 'alarm_id': '2', 'expression': 'avg(cpu.idle_perc{flavor_id=777, image_id=888, device=1}) > 10', 'sub_alarm_id': '43'}] self.assertEqual(sub_alarms, expected)
import ctypes import mmap import platform import sys import windows import windows.winproxy from . import simple_x86 as x86 from . import simple_x64 as x64 class PyObj(ctypes.Structure): _fields_ = [("ob_refcnt", ctypes.c_size_t), ("ob_type", ctypes.c_void_p)] # must be cast class PyMmap(PyObj): _fields_ = [("ob_addr", ctypes.c_size_t), ("ob_size", ctypes.c_size_t)] # Specific mmap class for code injection class MyMap(mmap.mmap): """ A mmap that is never unmapped and that contains the page address """ def __init__(self, *args, **kwarg): # Get the page address by 'introspection' of the C struct m = PyMmap.from_address(id(self)) self.addr = m.ob_addr # Prevent garbage collection (so unmaping) of the page m.ob_refcnt += 1 @classmethod def get_map(cls, size): """ Dispatch to the good mmap implem depending on the current system """ systems = {'windows': Win32MyMap, 'linux': UnixMyMap} x = platform.system().lower() if x not in systems: raise ValueError("Unknow system {0}".format(x)) return systems[x].get_map(size) class Win32MyMap(MyMap): @classmethod def get_map(cls, size): addr = windows.winproxy.VirtualAlloc(0, size, 0x1000, 0x40) new_map = (ctypes.c_char * size).from_address(addr) new_map.addr = addr if new_map.addr == 0: raise ctypes.WinError() return new_map class UnixMyMap(MyMap): @classmethod def get_map(cls, size): prot = mmap.PROT_EXEC | mmap.PROT_WRITE | mmap.PROT_READ return cls(-1, size, prot=prot) class CustomAllocator(object): int_size = {'32bit': 4, '64bit': 8} def __init__(self): self.maps = [] self.get_new_page(0x1000) self.names = [] @classmethod def get_int_size(cls): bits = platform.architecture()[0] if bits not in cls.int_size: raise ValueError("Unknow platform bits <{0}>".format(bits)) return cls.int_size[bits] def get_new_page(self, size): self.maps.append(MyMap.get_map(size)) self.cur_offset = 0 self.cur_page_size = size def reserve_size(self, size): if size + self.cur_offset > self.cur_page_size: self.get_new_page((size + 0x1000) & ~0xfff) addr = self.maps[-1].addr + self.cur_offset self.cur_offset += size return addr def reserve_int(self, nb_int=1): int_size = self.get_int_size() return self.reserve_size(int_size * nb_int) def write_code(self, code): size = len(code) if size + self.cur_offset > self.cur_page_size: self.get_new_page((size + 0x1000) & ~0xfff) self.maps[-1][self.cur_offset: self.cur_offset + size] = code addr = self.maps[-1].addr + self.cur_offset self.cur_offset += size return addr allocator = CustomAllocator() def get_functions(): version = sys.version_info python_dll = "python" + str(version.major) + str(version.minor) PyGILState_Ensure = windows.utils.get_func_addr(python_dll, 'PyGILState_Ensure'.encode()) PyObject_CallObject = windows.utils.get_func_addr(python_dll, 'PyObject_CallObject'.encode()) PyGILState_Release = windows.utils.get_func_addr(python_dll, 'PyGILState_Release'.encode()) return [PyGILState_Ensure, PyObject_CallObject, PyGILState_Release] def analyse_callback(callback): if not callable(callback): raise ValueError("Need a callable object :)") obj_id = id(callback) if not hasattr(callback, '_objects'): raise ValueError("Need a ctypes PyCFuncPtr") return obj_id # For windows 32 bits with stdcall def generate_stub_32(callback): c_callback = get_callback_address_32(callback) gstate_save_addr = x86.create_displacement(disp=allocator.reserve_int()) return_addr_save_addr = x86.create_displacement(disp=allocator.reserve_int()) save_ebx = x86.create_displacement(disp=allocator.reserve_int()) save_ecx = x86.create_displacement(disp=allocator.reserve_int()) save_edx = x86.create_displacement(disp=allocator.reserve_int()) save_esi = x86.create_displacement(disp=allocator.reserve_int()) save_edi = x86.create_displacement(disp=allocator.reserve_int()) ensure, objcall, release = get_functions() code = x86.MultipleInstr() # ## Shellcode ## # code += x86.Mov(save_ebx, 'EBX') code += x86.Mov(save_ecx, 'ECX') code += x86.Mov(save_edx, 'EDX') code += x86.Mov(save_esi, 'ESI') code += x86.Mov(save_edi, 'EDI') code += x86.Mov('EAX', ensure) code += x86.Call('EAX') code += x86.Mov(gstate_save_addr, 'EAX') # Save real return addr (for good argument parsing by the callback) code += x86.Pop('EAX') code += x86.Mov(return_addr_save_addr, 'EAX') code += x86.Mov('EAX', c_callback) code += x86.Call('EAX') # Restore real return value code += x86.Mov('EBX', return_addr_save_addr) code += x86.Push('EBX') # Save return value code += x86.Push('EAX') code += x86.Mov('EBX', gstate_save_addr) code += x86.Push('EBX') code += x86.Mov('EAX', release) code += x86.Call('EAX') # Discard `release` argument code += x86.Pop('EAX') # Restore return value code += x86.Pop('EAX') code += x86.Mov('EBX', save_ebx) code += x86.Mov('ECX', save_ecx) code += x86.Mov('EDX', save_edx) code += x86.Mov('ESI', save_esi) code += x86.Mov('EDI', save_edi) code += x86.Ret() return code def generate_stub_64(callback): c_callback = get_callback_address_64(callback) REG_LEN = ctypes.sizeof(ctypes.c_void_p) register_to_save = ("RBX", "RCX", "RDX", "RSI", "RDI", "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15") push_all_save_register = x64.MultipleInstr([x64.Push(reg) for reg in register_to_save]) pop_all_save_register = x64.MultipleInstr([x64.Pop(reg) for reg in reversed(register_to_save)]) # Reserve parallel `stack` save_register_space = allocator.reserve_int(len(register_to_save)) save_register_space_end = save_register_space + (ctypes.sizeof(ctypes.c_void_p) * (len(register_to_save))) save_rbx = save_register_space_end - REG_LEN save_rbx # Fuck the linter :D save_rcx = save_register_space_end - REG_LEN - REG_LEN save_rdx = save_register_space_end - REG_LEN - (REG_LEN * 2) save_rsi = save_register_space_end - REG_LEN - (REG_LEN * 3) save_rsi # Fuck the linter :D save_rdi = save_register_space_end - REG_LEN - (REG_LEN * 4) save_rdi # Fuck the linter :D save_r8 = save_register_space_end - REG_LEN - (REG_LEN * 5) save_r9 = save_register_space_end - REG_LEN - (REG_LEN * 6) gstate_save_addr = x64.create_displacement(disp=allocator.reserve_int()) return_addr_save_addr = x64.create_displacement(disp=allocator.reserve_int()) return_value_save_addr = x64.create_displacement(disp=allocator.reserve_int()) Reserve_space_for_call = x64.MultipleInstr([x64.Push('RDI')] * 4) Clean_space_for_call = x64.MultipleInstr([x64.Pop('RDI')] * 4) Do_stack_alignement = x64.MultipleInstr([x64.Push('RDI')] * 1) Remove_stack_alignement = x64.MultipleInstr([x64.Pop('RDI')] * 1) ensure, objcall, release = get_functions() # ## Shellcode ## # code = x64.MultipleInstr() # Save all registers code += x64.Mov('RAX', save_register_space_end) code += x64.Xchg('RAX', 'RSP') code += push_all_save_register code += x64.Xchg('RAX', 'RSP') # GOOO code += x64.Mov('RAX', ensure) code += Reserve_space_for_call code += Do_stack_alignement code += x64.Call('RAX') code += Remove_stack_alignement code += Clean_space_for_call code += x64.Mov(gstate_save_addr, 'RAX') # Save real return addr (for good argument parsing by the callback) code += x64.Pop('RAX') code += x64.Mov(return_addr_save_addr, 'RAX') # Restore parameters for real function call code += x64.Mov('RAX', save_rcx) code += x64.Mov('RCX', x64.mem('[RAX]')) code += x64.Mov('RAX', save_rdx) code += x64.Mov('RDX', x64.mem('[RAX]')) code += x64.Mov('RAX', save_r9) code += x64.Mov('R9', x64.mem('[RAX]')) code += x64.Mov('RAX', save_r8) code += x64.Mov('R8', x64.mem('[RAX]')) # Call python code code += x64.Mov('RAX', c_callback) # no need for stack alignement here as we poped the return addr # no need for Reserve_space_for_call as we must use the previous one for correct argument parsing code += x64.Call('RAX') # Save return value code += x64.Mov(return_value_save_addr, 'RAX') # Repush real return value code += x64.Mov('RAX', return_addr_save_addr) code += x64.Push('RAX') # Call release(gstate_save) code += x64.Mov('RAX', gstate_save_addr) code += x64.Mov('RCX', 'RAX') code += x64.Mov('RAX', release) code += Reserve_space_for_call code += Do_stack_alignement code += x64.Call('RAX') code += Remove_stack_alignement code += Clean_space_for_call # Restore registers code += x64.Mov('RAX', save_register_space) code += x64.Xchg('RAX', 'RSP') code += pop_all_save_register code += x64.Xchg('RAX', 'RSP') # Restore return value code += x64.Mov('RAX', return_value_save_addr) code += x64.Ret() return code def generate_callback_stub(callback, types): func_type = ctypes.WINFUNCTYPE(*types) c_callable = func_type(callback) if windows.current_process.bitness == 32: stub = generate_stub_32(c_callable) else: stub = generate_stub_64(c_callable) stub_addr = allocator.write_code(stub.get_code()) generate_callback_stub.l.append((stub, c_callable)) return stub_addr generate_callback_stub.l = [] def create_function(code, types): """Create a python function that call raw machine code :param str code: Raw machine code that will be called :param list types: Return type and parameters type (see :mod:`ctypes`) :return: the created function :rtype: function """ func_type = ctypes.CFUNCTYPE(*types) addr = allocator.write_code(code) return func_type(addr) # Return First argument for 32 bits code raw_code = x86.MultipleInstr() raw_code += x86.Mov('EAX', x86.mem('[ESP + 4]')) raw_code += x86.Ret() get_callback_address_32 = create_function(raw_code.get_code(), [ctypes.c_void_p]) # Return First argument for 64 bits code raw_code = x64.MultipleInstr() raw_code += x64.Mov('RAX', 'RCX') raw_code += x64.Ret() get_callback_address_64 = create_function(raw_code.get_code(), [ctypes.c_void_p])
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin from future.moves.urllib.error import HTTPError, URLError from future.utils import python_2_unicode_compatible import logging import os import re import time import warnings import pkg_resources from functools import total_ordering from http.client import BadStatusLine from path import Path from requests import RequestException from flexget import plugins as plugins_pkg from flexget import config_schema from flexget.event import add_event_handler as add_phase_handler from flexget.event import fire_event, remove_event_handlers log = logging.getLogger('plugin') @python_2_unicode_compatible class DependencyError(Exception): """Plugin depends on other plugin, but it cannot be loaded. Args: issued_by: name of the plugin trying to do the import missing: name of the plugin or library that is missing message: user readable error message All args are optional. """ def __init__(self, issued_by=None, missing=None, message=None, silent=False): super(DependencyError, self).__init__() self.issued_by = issued_by self.missing = missing self._message = message self.silent = silent def _get_message(self): if self._message: return self._message else: return 'Plugin `%s` requires dependency `%s`' % (self.issued_by, self.missing) def _set_message(self, message): self._message = message def has_message(self): return self._message is not None message = property(_get_message, _set_message) def __str__(self): return '<DependencyError(issued_by=%r,missing=%r,message=%r,silent=%r)>' % \ (self.issued_by, self.missing, self.message, self.silent) class RegisterException(Exception): def __init__(self, value): super(RegisterException, self).__init__() self.value = value def __str__(self): return repr(self.value) @python_2_unicode_compatible class PluginWarning(Warning): def __init__(self, value, logger=log, **kwargs): super(PluginWarning, self).__init__() self.value = value self.log = logger self.kwargs = kwargs def __str__(self): return self.value @python_2_unicode_compatible class PluginError(Exception): def __init__(self, value, logger=log, **kwargs): super(PluginError, self).__init__() # Value is expected to be a string if not isinstance(value, str): value = str(value) self.value = value self.log = logger self.kwargs = kwargs def __str__(self): return self.value # TODO: move to utils or somewhere more appropriate class internet(object): """@internet decorator for plugin phase methods. Catches all internet related exceptions and raises PluginError with relevant message. Task handles PluginErrors by aborting the task. """ def __init__(self, logger=None): if logger: self.log = logger else: self.log = logging.getLogger('@internet') def __call__(self, func): def wrapped_func(*args, **kwargs): try: return func(*args, **kwargs) except RequestException as e: log.debug('decorator caught RequestException. handled traceback:', exc_info=True) raise PluginError('RequestException: %s' % e) except HTTPError as e: raise PluginError('HTTPError %s' % e.code, self.log) except URLError as e: log.debug('decorator caught urlerror. handled traceback:', exc_info=True) raise PluginError('URLError %s' % e.reason, self.log) except BadStatusLine: log.debug('decorator caught badstatusline. handled traceback:', exc_info=True) raise PluginError('Got BadStatusLine', self.log) except ValueError as e: log.debug('decorator caught ValueError. handled traceback:', exc_info=True) raise PluginError(e) except IOError as e: log.debug('decorator caught ioerror. handled traceback:', exc_info=True) if hasattr(e, 'reason'): raise PluginError('Failed to reach server. Reason: %s' % e.reason, self.log) elif hasattr(e, 'code'): raise PluginError('The server couldn\'t fulfill the request. Error code: %s' % e.code, self.log) raise PluginError('IOError when connecting to server: %s' % e, self.log) return wrapped_func def priority(value): """Priority decorator for phase methods""" def decorator(target): target.priority = value return target return decorator DEFAULT_PRIORITY = 128 plugin_contexts = ['task', 'root'] # task phases, in order of their execution; note that this can be extended by # registering new phases at runtime task_phases = ['start', 'input', 'metainfo', 'filter', 'download', 'modify', 'output', 'learn', 'exit'] # map phase names to method names phase_methods = { # task 'abort': 'on_task_abort' # special; not a task phase that gets called normally } phase_methods.update((_phase, 'on_task_' + _phase) for _phase in task_phases) # DRY # Mapping of plugin name to PluginInfo instance (logical singletons) plugins = {} # Loading done? plugins_loaded = False _loaded_plugins = {} _plugin_options = [] _new_phase_queue = {} def register_task_phase(name, before=None, after=None): """Adds a new task phase to the available phases.""" if before and after: raise RegisterException('You can only give either before or after for a phase.') if not before and not after: raise RegisterException('You must specify either a before or after phase.') if name in task_phases or name in _new_phase_queue: raise RegisterException('Phase %s already exists.' % name) def add_phase(phase_name, before, after): if before is not None and before not in task_phases: return False if after is not None and after not in task_phases: return False # add method name to phase -> method lookup table phase_methods[phase_name] = 'on_task_' + phase_name # place phase in phase list if before is None: task_phases.insert(task_phases.index(after) + 1, phase_name) if after is None: task_phases.insert(task_phases.index(before), phase_name) return True # if can't add yet (dependencies) queue addition if not add_phase(name, before, after): _new_phase_queue[name] = [before, after] for phase_name, args in list(_new_phase_queue.items()): if add_phase(phase_name, *args): del _new_phase_queue[phase_name] @total_ordering class PluginInfo(dict): """ Allows accessing key/value pairs of this dictionary subclass via attributes. Also instantiates a plugin and initializes properties. """ # Counts duplicate registrations dupe_counter = 0 def __init__(self, plugin_class, name=None, groups=None, builtin=False, debug=False, api_ver=1, contexts=None, category=None): """ Register a plugin. :param plugin_class: The plugin factory. :param string name: Name of the plugin (if not given, default to factory class name in underscore form). :param list groups: Groups this plugin belongs to. :param bool builtin: Auto-activated? :param bool debug: True if plugin is for debugging purposes. :param int api_ver: Signature of callback hooks (1=task; 2=task,config). :param list contexts: List of where this plugin is configurable. Can be 'task', 'root', or None :param string category: The type of plugin. Can be one of the task phases. Defaults to the package name containing the plugin. """ dict.__init__(self) if groups is None: groups = [] if name is None: # Convention is to take camel-case class name and rewrite it to an underscore form, # e.g. 'PluginName' to 'plugin_name' name = re.sub('[A-Z]+', lambda i: '_' + i.group(0).lower(), plugin_class.__name__).lstrip('_') if contexts is None: contexts = ['task'] elif isinstance(contexts, str): contexts = [contexts] if category is None and plugin_class.__module__.startswith('flexget.plugins'): # By default look at the containing package of the plugin. category = plugin_class.__module__.split('.')[-2] # Check for unsupported api versions if api_ver < 2: warnings.warn('Api versions <2 are no longer supported. Plugin %s' % name, DeprecationWarning, stacklevel=2) # Set basic info attributes self.api_ver = api_ver self.name = name self.groups = groups self.builtin = builtin self.debug = debug self.contexts = contexts self.category = category self.phase_handlers = {} self.plugin_class = plugin_class self.instance = None if self.name in plugins: PluginInfo.dupe_counter += 1 raise Exception('already %s' % self.name) log.critical('Error while registering plugin %s. ' 'A plugin with the same name is already registered', self.name) else: plugins[self.name] = self def initialize(self): if self.instance is not None: # We already initialized return # Create plugin instance self.instance = self.plugin_class() self.instance.plugin_info = self # give plugin easy access to its own info self.instance.log = logging.getLogger(getattr(self.instance, "LOGGER_NAME", None) or self.name) if hasattr(self.instance, 'schema'): self.schema = self.instance.schema elif hasattr(self.instance, 'validator'): self.schema = self.instance.validator().schema() else: # TODO: I think plugins without schemas should not be allowed in config, maybe rethink this self.schema = {} if self.schema is not None: location = '/schema/plugin/%s' % self.name self.schema['id'] = location config_schema.register_schema(location, self.schema) self.build_phase_handlers() def reset_phase_handlers(self): """Temporary utility method""" self.phase_handlers = {} self.build_phase_handlers() # TODO: should unregister events (from flexget.event) # this method is not used at the moment anywhere ... raise NotImplementedError def build_phase_handlers(self): """(Re)build phase_handlers in this plugin""" for phase, method_name in phase_methods.items(): if phase in self.phase_handlers: continue if hasattr(self.instance, method_name): method = getattr(self.instance, method_name) if not callable(method): continue # check for priority decorator if hasattr(method, 'priority'): handler_prio = method.priority else: handler_prio = DEFAULT_PRIORITY event = add_phase_handler('plugin.%s.%s' % (self.name, phase), method, handler_prio) # provides backwards compatibility event.plugin = self self.phase_handlers[phase] = event def __getattr__(self, attr): if attr in self: return self[attr] return dict.__getattribute__(self, attr) def __setattr__(self, attr, value): self[attr] = value def __str__(self): return '<PluginInfo(name=%s)>' % self.name def _is_valid_operand(self, other): return hasattr(other, 'name') def __eq__(self, other): return self.name == other.name def __lt__(self, other): return self.name < other.name __repr__ = __str__ register = PluginInfo def _strip_trailing_sep(path): return path.rstrip("\\/") def _get_standard_plugins_path(): """ :returns: List of directories where plugins should be tried to load from. """ # Get basic path from environment paths = [] env_path = os.environ.get('FLEXGET_PLUGIN_PATH') if env_path: paths = env_path.split(os.pathsep) # Add flexget.plugins directory (core plugins) paths.append(os.path.abspath(os.path.dirname(plugins_pkg.__file__))) return paths def _check_phase_queue(): if _new_phase_queue: for phase, args in _new_phase_queue.items(): log.error('Plugin %s requested new phase %s, but it could not be created at requested ' 'point (before, after). Plugin is not working properly.', args[0], phase) def _load_plugins_from_dirs(dirs): """ :param list dirs: Directories from where plugins are loaded from """ log.debug('Trying to load plugins from: %s' % dirs) dirs = [Path(d) for d in dirs if os.path.isdir(d)] # add all dirs to plugins_pkg load path so that imports work properly from any of the plugin dirs plugins_pkg.__path__ = list(map(_strip_trailing_sep, dirs)) for plugins_dir in dirs: for plugin_path in plugins_dir.walkfiles('*.py'): if plugin_path.name == '__init__.py': continue # Split the relative path from the plugins dir to current file's parent dir to find subpackage names plugin_subpackages = [_f for _f in plugin_path.relpath(plugins_dir).parent.splitall() if _f] module_name = '.'.join([plugins_pkg.__name__] + plugin_subpackages + [plugin_path.namebase]) try: __import__(module_name) except DependencyError as e: if e.has_message(): msg = e.message else: msg = 'Plugin `%s` requires `%s` to load.', e.issued_by or module_name, e.missing or 'N/A' if not e.silent: log.warning(msg) else: log.debug(msg) except ImportError as e: log.critical('Plugin `%s` failed to import dependencies', module_name, exc_info=True) except ValueError as e: # Debugging #2755 log.error('ValueError attempting to import `%s` (from %s): %s', module_name, plugin_path, e) except Exception as e: log.critical('Exception while loading plugin %s', module_name, exc_info=True) raise else: log.trace('Loaded module %s from %s', module_name, plugin_path) _check_phase_queue() def _load_plugins_from_packages(): """Load plugins installed via PIP""" for entrypoint in pkg_resources.iter_entry_points('FlexGet.plugins'): try: plugin_module = entrypoint.load() except DependencyError as e: if e.has_message(): msg = e.message else: msg = 'Plugin `%s` requires `%s` to load.', e.issued_by or entrypoint.module_name, e.missing or 'N/A' if not e.silent: log.warning(msg) else: log.debug(msg) except ImportError as e: log.critical('Plugin `%s` failed to import dependencies', entrypoint.module_name, exc_info=True) except Exception as e: log.critical('Exception while loading plugin %s', entrypoint.module_name, exc_info=True) raise else: log.trace('Loaded packaged module %s from %s', entrypoint.module_name, plugin_module.__file__) _check_phase_queue() def load_plugins(extra_dirs=None): """ Load plugins from the standard plugin paths. :param list extra_dirs: Extra directories from where plugins are loaded. """ global plugins_loaded if not extra_dirs: extra_dirs = [] # Add flexget.plugins directory (core plugins) extra_dirs.extend(_get_standard_plugins_path()) start_time = time.time() # Import all the plugins _load_plugins_from_dirs(extra_dirs) _load_plugins_from_packages() # Register them fire_event('plugin.register') # Plugins should only be registered once, remove their handlers after remove_event_handlers('plugin.register') # After they have all been registered, instantiate them for plugin in list(plugins.values()): plugin.initialize() took = time.time() - start_time plugins_loaded = True log.debug('Plugins took %.2f seconds to load. %s plugins in registry.', took, len(plugins.keys())) def get_plugins(phase=None, group=None, context=None, category=None, name=None, min_api=None): """ Query other plugins characteristics. :param string phase: Require phase :param string group: Plugin must belong to this group. :param string context: Where plugin is configured, eg. (root, task) :param string category: Type of plugin, phase names. :param string name: Name of the plugin. :param int min_api: Minimum api version. :return: List of PluginInfo instances. :rtype: list """ def matches(plugin): if phase is not None and phase not in phase_methods: raise ValueError('Unknown phase %s' % phase) if phase and phase not in plugin.phase_handlers: return False if group and group not in plugin.groups: return False if context and context not in plugin.contexts: return False if category and not category == plugin.category: return False if name is not None and name != plugin.name: return False if min_api is not None and plugin.api_ver < min_api: return False return True return filter(matches, iter(plugins.values())) def plugin_schemas(**kwargs): """Create a dict schema that matches plugins specified by `kwargs`""" return {'type': 'object', 'properties': dict((p.name, {'$ref': p.schema['id']}) for p in get_plugins(**kwargs)), 'additionalProperties': False, 'error_additionalProperties': '{{message}} Only known plugin names are valid keys.', 'patternProperties': {'^_': {'title': 'Disabled Plugin'}}} config_schema.register_schema('/schema/plugins', plugin_schemas) def get_phases_by_plugin(name): """Return all phases plugin :name: hooks""" return list(get_plugin_by_name(name).phase_handlers) def get_plugin_keywords(): """Return iterator over all plugin keywords.""" return iter(plugins.keys()) def get_plugin_by_name(name, issued_by='???'): """Get plugin by name, preferred way since this structure may be changed at some point.""" if name not in plugins: raise DependencyError(issued_by=issued_by, missing=name, message='Unknown plugin %s' % name) return plugins[name]
""" tests.components.automation.test_state ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tests state automation. """ import unittest from datetime import timedelta from unittest.mock import patch import homeassistant.util.dt as dt_util import homeassistant.components.automation as automation import homeassistant.components.automation.state as state from tests.common import fire_time_changed, get_test_home_assistant class TestAutomationState(unittest.TestCase): """ Test the event automation. """ def setUp(self): # pylint: disable=invalid-name self.hass = get_test_home_assistant() self.hass.states.set('test.entity', 'hello') self.calls = [] def record_call(service): self.calls.append(service) self.hass.services.register('test', 'automation', record_call) def tearDown(self): # pylint: disable=invalid-name """ Stop down stuff we started. """ self.hass.stop() def test_old_config_if_fires_on_entity_change(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'platform': 'state', 'state_entity_id': 'test.entity', 'execute_service': 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_old_config_if_fires_on_entity_change_with_from_filter(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'platform': 'state', 'state_entity_id': 'test.entity', 'state_from': 'hello', 'execute_service': 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_old_config_if_fires_on_entity_change_with_to_filter(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'platform': 'state', 'state_entity_id': 'test.entity', 'state_to': 'world', 'execute_service': 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_old_config_if_fires_on_entity_change_with_both_filters(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'platform': 'state', 'state_entity_id': 'test.entity', 'state_from': 'hello', 'state_to': 'world', 'execute_service': 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_old_config_if_not_fires_if_to_filter_not_match(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'platform': 'state', 'state_entity_id': 'test.entity', 'state_from': 'hello', 'state_to': 'world', 'execute_service': 'test.automation' } })) self.hass.states.set('test.entity', 'moon') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_old_config_if_not_fires_if_from_filter_not_match(self): self.hass.states.set('test.entity', 'bye') self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'platform': 'state', 'state_entity_id': 'test.entity', 'state_from': 'hello', 'state_to': 'world', 'execute_service': 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_old_config_if_not_fires_if_entity_not_match(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'platform': 'state', 'state_entity_id': 'test.another_entity', 'execute_service': 'test.automation' } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_old_config_if_action(self): entity_id = 'domain.test_entity' test_state = 'new_state' automation.setup(self.hass, { automation.DOMAIN: { 'platform': 'event', 'event_type': 'test_event', 'execute_service': 'test.automation', 'if': [{ 'platform': 'state', 'entity_id': entity_id, 'state': test_state, }] } }) self.hass.states.set(entity_id, test_state) self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) self.hass.states.set(entity_id, test_state + 'something') self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'state', 'entity_id': 'test.entity', }, 'action': { 'service': 'test.automation' } } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change_with_from_filter(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'state', 'entity_id': 'test.entity', 'from': 'hello' }, 'action': { 'service': 'test.automation' } } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change_with_to_filter(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'state', 'entity_id': 'test.entity', 'to': 'world' }, 'action': { 'service': 'test.automation' } } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change_with_state_filter(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'state', 'entity_id': 'test.entity', 'state': 'world' }, 'action': { 'service': 'test.automation' } } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_entity_change_with_both_filters(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'state', 'entity_id': 'test.entity', 'from': 'hello', 'to': 'world' }, 'action': { 'service': 'test.automation' } } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_not_fires_if_to_filter_not_match(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'state', 'entity_id': 'test.entity', 'from': 'hello', 'to': 'world' }, 'action': { 'service': 'test.automation' } } })) self.hass.states.set('test.entity', 'moon') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_if_not_fires_if_from_filter_not_match(self): self.hass.states.set('test.entity', 'bye') self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'state', 'entity_id': 'test.entity', 'from': 'hello', 'to': 'world' }, 'action': { 'service': 'test.automation' } } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_if_not_fires_if_entity_not_match(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'state', 'entity_id': 'test.anoter_entity', }, 'action': { 'service': 'test.automation' } } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_if_action(self): entity_id = 'domain.test_entity' test_state = 'new_state' automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'event', 'event_type': 'test_event', }, 'condition': [{ 'platform': 'state', 'entity_id': entity_id, 'state': test_state }], 'action': { 'service': 'test.automation' } } }) self.hass.states.set(entity_id, test_state) self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) self.hass.states.set(entity_id, test_state + 'something') self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fails_setup_if_to_boolean_value(self): self.assertFalse(state.trigger( self.hass, { 'platform': 'state', 'entity_id': 'test.entity', 'to': True, }, lambda x: x)) def test_if_fails_setup_if_from_boolean_value(self): self.assertFalse(state.trigger( self.hass, { 'platform': 'state', 'entity_id': 'test.entity', 'from': True, }, lambda x: x)) def test_if_fails_setup_bad_for(self): self.assertFalse(state.trigger( self.hass, { 'platform': 'state', 'entity_id': 'test.entity', 'to': 'world', 'for': { 'invalid': 5 }, }, lambda x: x)) def test_if_fails_setup_for_without_to(self): self.assertFalse(state.trigger( self.hass, { 'platform': 'state', 'entity_id': 'test.entity', 'for': { 'seconds': 5 }, }, lambda x: x)) def test_if_not_fires_on_entity_change_with_for(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'state', 'entity_id': 'test.entity', 'to': 'world', 'for': { 'seconds': 5 }, }, 'action': { 'service': 'test.automation' } } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() self.hass.states.set('test.entity', 'not_world') self.hass.pool.block_till_done() fire_time_changed(self.hass, dt_util.utcnow() + timedelta(seconds=10)) self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) def test_if_fires_on_entity_change_with_for(self): self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'state', 'entity_id': 'test.entity', 'to': 'world', 'for': { 'seconds': 5 }, }, 'action': { 'service': 'test.automation' } } })) self.hass.states.set('test.entity', 'world') self.hass.pool.block_till_done() fire_time_changed(self.hass, dt_util.utcnow() + timedelta(seconds=10)) self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fires_on_for_condition(self): point1 = dt_util.utcnow() point2 = point1 + timedelta(seconds=10) with patch('homeassistant.core.dt_util.utcnow') as mock_utcnow: mock_utcnow.return_value = point1 self.hass.states.set('test.entity', 'on') self.assertTrue(automation.setup(self.hass, { automation.DOMAIN: { 'trigger': { 'platform': 'event', 'event_type': 'test_event', }, 'condition': { 'platform': 'state', 'entity_id': 'test.entity', 'state': 'on', 'for': { 'seconds': 5 }, }, 'action': { 'service': 'test.automation' } } })) # not enough time has passed self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(0, len(self.calls)) # Time travel 10 secs into the future mock_utcnow.return_value = point2 self.hass.bus.fire('test_event') self.hass.pool.block_till_done() self.assertEqual(1, len(self.calls)) def test_if_fails_setup_for_without_time(self): self.assertIsNone(state.if_action( self.hass, { 'platform': 'state', 'entity_id': 'test.entity', 'state': 'on', 'for': {}, })) def test_if_fails_setup_for_without_entity(self): self.assertIsNone(state.if_action( self.hass, { 'platform': 'state', 'state': 'on', 'for': { 'seconds': 5 }, }))
import time import traceback from uuid import uuid4 import ircd from ircd.constants import * class Channel(object): channels = {} @classmethod def get(cls, name, user=None): return cls.channels.get(name, None) def __init__(self, name, creator): self.name = name self._modes = "" self.topic = "" self.creator = creator.id self.created = time.time() self.topicset = 0 self.setby = creator.nickname self.key = "" self.banlist = [] self.exemptlist = [] self.invitelist = [] def __contains__(self, item): if isinstance(item, User): for record in item.chans: if record.channel.name == self.name: return True return False return False @property def moderated(self): return "m" in self._modes @property def modes(self): # TODO actually implement this return def mode(self, user): record = user.rec(self) if (record.uc_modes & 1) > 0: return "@" if (record.uc_modes & 2) > 0: return "+" if (record.uc_modes & 4) > 0: return "%" return "" def status(self, user): record = user.rec(self) if (record.uc_modes & 1) > 0: return 4 if (record.uc_modes & 2) > 0: return 1 if (record.uc_modes & 4) > 0: return 2 return 0 def remove(self, user, reason=None): created = 0 record = user.rec(self) if reason: user.send_channel(self, "PART %s :%s" % (self.name, reason)) else: user.send_channel(self, "PART :%s" % self.name) user.chans.remove(record) if not len(self.users()): del Channel.channels[self.name] def users(self): inchannel = [] for user in User.users.values(): if user in self: inchannel.append(user) return inchannel class UserRecord(object): def __init__(self, channel, created=False): # TODO actually implement this self.channel = channel self.uc_modes = 1 if created else 0 class User(object): users = {} nicktable = {} @classmethod def add_client(cls, socket, addr): id = uuid4() while id in cls.users: id = uuid4() user = User(id, socket, addr) @classmethod def get_by_id(cls, id): return cls.users.get(id) @classmethod def get_by_nick(cls, nick): ids = cls.nicktable.get(nick.lower()) if not ids: return set() return map(lambda id: cls.users.get(id), ids) @classmethod def same_nick(cls, a, b): return a.lower() == b.lower() @classmethod def send_opers(cls, message): for user in cls.users.values(): if "o" in user.modes and "s" in user.modes: user.send_serv("NOTICE %s :%s" % (user.nickname, message)) def __init__(self, id, socket, addr): self.id = id self._nickname = None self.ident = None self.registered = 0 self.host, self.port = addr self.lastactive = time.time() self.lastping = 1 self.modes = "" self.chans = [] self.socket = socket self.__invites = [] self.quit = False User.users[id] = self while not self.quit: try: buffer = "" while len(buffer) < BUFFER_SIZE: char = self.socket.recv(1).decode("utf-8") if char == "\n": break buffer += char # if not buffer: # del self # break ircd.logger.info("<< %s: %s" % (self.id, buffer)) ircd.commands.Command.process_command(self, buffer) except IOError as e: if e.errno == 32: ircd.logger.error("Pipe was broken.") self.socket.close() except Exception as e: ircd.logger.error("Something bad happened:\n%s" % traceback.format_exc()) def auth(self): # TODO actually implement authentication return True def connect(self): """ Actually connect the user to the network, displays the MOTD, etc. :return: Nothing """ self.registered = 7 self.lastactive = time.time() if not self.auth(): self.send("ERROR :Closing link: Invalid password.") self.send_serv("NOTICE Auth :Welcome to \002XinIRC Network\002!") self.send_serv("001 %s :Welcome to the XinIRC Network %s!%s@%s." % ( self.nickname, self.nickname, self.ident, self.host)) self.send_serv("002 %s :Your host is %s, running version %s." % (self.nickname, ircd.config.getopt("SERVER_NAME"), VERSION)) self.send_serv("003 %s :This server was created at %s." % (self.nickname, ircd.config.getopt("CREATION"))) self.motd() User.send_opers("*** Client connecting on port %d: %s!%s@%s" % (self.port, self.nickname, self.ident, self.host)) def join(self, name, key=None): created = 0 channel = Channel.get(name) ircd.logger.debug( "Joining channel %s (with key %s)" % (name, repr(key))) if channel: if self in channel: return created = 1 else: channel = Channel(name, self) Channel.channels[name] = channel created = 2 if len(self.chans) == ircd.config.getopt("MAX_CHANNEL"): ircd.logger.debug("User channel maximum exceeded: %s %s" % (self.nickname, channel.name)) self.send_serv("405 %s %s :You are on too many channels." % ( self.nickname, channel.name)) return rec = UserRecord(channel, created=(created == 2)) self.chans.append(rec) self.send_channel(channel, "JOIN :%s" % channel.name) if channel.topicset: self.send_serv("332 %s %s :%s" % (self.nickname, channel.name, channel.topic)) self.send_serv("333 %s %s %s %d" % ( self.nickname, channel.name, channel.setby, channel.topicset)) self.userlist(channel) self.send_serv("366 %s %s :End of /NAMES list." % (self.nickname, channel.name)) self.send_serv("324 %s %s +%s" % (self.nickname, channel.name, channel.modes)) self.send_serv("329 %s %s %d" % (self.nickname, channel.name, channel.created)) def motd(self): if not ircd.config.getopt("MOTD_FILE"): self.send_serv( "422 %s :Message of the day file is missing." % self.nickname) return motd = open(ircd.config.getopt("MOTD_FILE"), "r") self.send_serv("375 %s :- %s message of the day" % (self.nickname, ircd.config.getopt("SERVER_NAME"))) for line in motd: line = line.strip("\n") self.send_serv(("372 %s :- %s") % (self.nickname, line)) self.send_serv("376 %s :End of %s message of the day." % (self.nickname, ircd.config.getopt("SERVER_NAME"))) def rec(self, channel): for record in self.chans: if record.channel.name == channel.name: return record def send(self, line): ircd.logger.info(">> %s" % line) self.socket.sendall(bytes("%s\n" % line, encoding="utf-8")) def send_channel(self, channel, line, exclude=False): for user in channel.users(): if exclude and user.id == self.id: continue self.send_to(user, line) def send_common(self, line, exclude=False): my_channels = map(lambda rec: rec.channel.name, self.chans) if not exclude: self.send_from(self, line) for user in User.users.values(): for record in user.chans: if record.channel.name in my_channels: user.send_from(self, line) break def send_from(self, user, line): self.send(":%s!%s@%s %s" % (user.nickname, user.ident, user.host, line)) def send_to(self, dest, line): dest.send_from(self, line) def send_serv(self, line): self.send(":%s %s" % (ircd.config.getopt("SERVER_NAME"), line)) def userlist(self, channel): users = "353 %s = %s :" % (self.nickname, channel.name) for user in User.users.values(): if user in channel: # TODO +i user doesn't show up in list users += channel.mode(user) users += user.nickname users += " " if len(users) > 480 - ircd.config.getopt("NICK_LENGTH"): self.send_serv(users) users = "353 %s = %s :" % (self.nickname, channel.name) if users[-1] != ":": self.send_serv(users) @property def nickname(self): return self._nickname @nickname.setter def nickname(self, value): nickname = self._nickname.lower() if self._nickname else None if nickname in User.nicktable and self.id in User.nicktable[nickname]: User.nicktable[nickname].remove(self.id) if not User.nicktable[nickname]: del User.nicktable[nickname] if value: if value not in User.nicktable: User.nicktable[value.lower()] = set() User.nicktable[value.lower()].add(self.id) self._nickname = value def __del__(self): self.nickname = None ircd.logger.info( "Connection closed with %s:%s" % (self.host, self.port)) self.socket.close()
# -*- coding: utf-8 -*- ''' flask.ext.login --------------- This module provides user session management for Flask. It lets you log your users in and out in a database-independent manner. :copyright: (c) 2011 by Matthew Frazier. :license: MIT/X11, see LICENSE for more details. ''' __version_info__ = ('0', '2', '9') __version__ = '.'.join(__version_info__) __author__ = 'Matthew Frazier' __license__ = 'MIT/X11' __copyright__ = '(c) 2011 by Matthew Frazier' __all__ = ['LoginManager'] from flask import (_request_ctx_stack, abort, current_app, flash, redirect, request, session, url_for) from flask.signals import Namespace from werkzeug.local import LocalProxy from werkzeug.security import safe_str_cmp from werkzeug.urls import url_decode, url_encode from datetime import datetime, timedelta from functools import wraps from hashlib import sha1, md5 import hmac import warnings import sys if sys.version < '3': # pragma: no cover from urlparse import urlparse, urlunparse else: # pragma: no cover from urllib.parse import urlparse, urlunparse unicode = str _signals = Namespace() #: A proxy for the current user. If no user is logged in, this will be an #: anonymous user current_user = LocalProxy(lambda: _get_user()) #: The default name of the "remember me" cookie (``remember_token``) COOKIE_NAME = 'remember_token' #: The default time before the "remember me" cookie expires (365 days). COOKIE_DURATION = timedelta(days=365) #: Whether the "remember me" cookie requires Secure; defaults to ``None`` COOKIE_SECURE = None #: Whether the "remember me" cookie uses HttpOnly or not; defaults to ``False`` COOKIE_HTTPONLY = False #: The default flash message to display when users need to log in. LOGIN_MESSAGE = u'Please log in to access this page.' #: The default flash message category to display when users need to log in. LOGIN_MESSAGE_CATEGORY = 'message' #: The default flash message to display when users need to reauthenticate. REFRESH_MESSAGE = u'Please reauthenticate to access this page.' #: The default flash message category to display when users need to #: reauthenticate. REFRESH_MESSAGE_CATEGORY = 'message' #: The default attribute to retreive the unicode id of the user ID_ATTRIBUTE = 'get_id' #: Default name of the auth header (``Authorization``) AUTH_HEADER_NAME = 'Authorization' class LoginManager(object): ''' This object is used to hold the settings used for logging in. Instances of :class:`LoginManager` are *not* bound to specific apps, so you can create one in the main body of your code and then bind it to your app in a factory function. ''' def __init__(self, app=None, add_context_processor=True): #: A class or factory function that produces an anonymous user, which #: is used when no one is logged in. self.anonymous_user = AnonymousUserMixin #: The name of the view to redirect to when the user needs to log in. #: (This can be an absolute URL as well, if your authentication #: machinery is external to your application.) self.login_view = None #: The message to flash when a user is redirected to the login page. self.login_message = LOGIN_MESSAGE #: The message category to flash when a user is redirected to the login #: page. self.login_message_category = LOGIN_MESSAGE_CATEGORY #: The name of the view to redirect to when the user needs to #: reauthenticate. self.refresh_view = None #: The message to flash when a user is redirected to the 'needs #: refresh' page. self.needs_refresh_message = REFRESH_MESSAGE #: The message category to flash when a user is redirected to the #: 'needs refresh' page. self.needs_refresh_message_category = REFRESH_MESSAGE_CATEGORY #: The mode to use session protection in. This can be either #: ``'basic'`` (the default) or ``'strong'``, or ``None`` to disable #: it. self.session_protection = 'basic' #: If present, used to translate flash messages ``self.login_message`` #: and ``self.needs_refresh_message`` self.localize_callback = None self.token_callback = None self.user_callback = None self.unauthorized_callback = None self.needs_refresh_callback = None self.id_attribute = ID_ATTRIBUTE self.header_callback = None if app is not None: self.init_app(app, add_context_processor) def setup_app(self, app, add_context_processor=True): # pragma: no cover ''' This method has been deprecated. Please use :meth:`LoginManager.init_app` instead. ''' warnings.warn('Warning setup_app is deprecated. Please use init_app.', DeprecationWarning) self.init_app(app, add_context_processor) def init_app(self, app, add_context_processor=True): ''' Configures an application. This registers an `after_request` call, and attaches this `LoginManager` to it as `app.login_manager`. :param app: The :class:`flask.Flask` object to configure. :type app: :class:`flask.Flask` :param add_context_processor: Whether to add a context processor to the app that adds a `current_user` variable to the template. Defaults to ``True``. :type add_context_processor: bool ''' app.login_manager = self app.after_request(self._update_remember_cookie) self._login_disabled = app.config.get('LOGIN_DISABLED', app.config.get('TESTING', False)) if add_context_processor: app.context_processor(_user_context_processor) def unauthorized(self): ''' This is called when the user is required to log in. If you register a callback with :meth:`LoginManager.unauthorized_handler`, then it will be called. Otherwise, it will take the following actions: - Flash :attr:`LoginManager.login_message` to the user. - Redirect the user to `login_view`. (The page they were attempting to access will be passed in the ``next`` query string variable, so you can redirect there if present instead of the homepage.) If :attr:`LoginManager.login_view` is not defined, then it will simply raise a HTTP 401 (Unauthorized) error instead. This should be returned from a view or before/after_request function, otherwise the redirect will have no effect. ''' user_unauthorized.send(current_app._get_current_object()) if self.unauthorized_callback: return self.unauthorized_callback() if not self.login_view: abort(401) if self.login_message: if self.localize_callback is not None: flash(self.localize_callback(self.login_message), category=self.login_message_category) else: flash(self.login_message, category=self.login_message_category) return redirect(login_url(self.login_view, request.url)) def user_loader(self, callback): ''' This sets the callback for reloading a user from the session. The function you set should take a user ID (a ``unicode``) and return a user object, or ``None`` if the user does not exist. :param callback: The callback for retrieving a user object. :type callback: unicode ''' self.user_callback = callback return callback def header_loader(self, callback): ''' This sets the callback for loading a user from a header value. The function you set should take an authentication token and return a user object, or `None` if the user does not exist. :param callback: The callback for retrieving a user object. ''' self.header_callback = callback return callback def token_loader(self, callback): ''' This sets the callback for loading a user from an authentication token. The function you set should take an authentication token (a ``unicode``, as returned by a user's `get_auth_token` method) and return a user object, or ``None`` if the user does not exist. :param callback: The callback for retrieving a user object. :type callback: unicode ''' self.token_callback = callback return callback def unauthorized_handler(self, callback): ''' This will set the callback for the `unauthorized` method, which among other things is used by `login_required`. It takes no arguments, and should return a response to be sent to the user instead of their normal view. :param callback: The callback for unauthorized users. :type callback: function ''' self.unauthorized_callback = callback return callback def needs_refresh_handler(self, callback): ''' This will set the callback for the `needs_refresh` method, which among other things is used by `fresh_login_required`. It takes no arguments, and should return a response to be sent to the user instead of their normal view. :param callback: The callback for unauthorized users. :type callback: function ''' self.needs_refresh_callback = callback return callback def needs_refresh(self): ''' This is called when the user is logged in, but they need to be reauthenticated because their session is stale. If you register a callback with `needs_refresh_handler`, then it will be called. Otherwise, it will take the following actions: - Flash :attr:`LoginManager.needs_refresh_message` to the user. - Redirect the user to :attr:`LoginManager.refresh_view`. (The page they were attempting to access will be passed in the ``next`` query string variable, so you can redirect there if present instead of the homepage.) If :attr:`LoginManager.refresh_view` is not defined, then it will simply raise a HTTP 403 (Forbidden) error instead. This should be returned from a view or before/after_request function, otherwise the redirect will have no effect. ''' user_needs_refresh.send(current_app._get_current_object()) if self.needs_refresh_callback: return self.needs_refresh_callback() if not self.refresh_view: abort(403) if self.localize_callback is not None: flash(self.localize_callback(self.needs_refresh_message), category=self.needs_refresh_message_category) else: flash(self.needs_refresh_message, category=self.needs_refresh_message_category) return redirect(login_url(self.refresh_view, request.url)) def reload_user(self, user=None): ctx = _request_ctx_stack.top if user is None: user_id = session.get('user_id') if user_id is None: ctx.user = self.anonymous_user() else: user = self.user_callback(user_id) if user is None: logout_user() else: ctx.user = user else: ctx.user = user def _load_user(self): '''Loads user from session or remember_me cookie as applicable''' user_accessed.send(current_app._get_current_object()) # first check SESSION_PROTECTION config = current_app.config if config.get('SESSION_PROTECTION', self.session_protection): deleted = self._session_protection() if deleted: return self.reload_user() # If a remember cookie is set, and the session is not, move the # cookie user ID to the session. # # However, the session may have been set if the user has been # logged out on this request, 'remember' would be set to clear, # so we should check for that and not restore the session. is_missing_user_id = 'user_id' not in session if is_missing_user_id: cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME) header_name = config.get('AUTH_HEADER_NAME', AUTH_HEADER_NAME) has_cookie = (cookie_name in request.cookies and session.get('remember') != 'clear') if has_cookie: return self._load_from_cookie(request.cookies[cookie_name]) elif header_name in request.headers: return self._load_from_header(request.headers[header_name]) return self.reload_user() def _session_protection(self): sess = session._get_current_object() ident = _create_identifier() app = current_app._get_current_object() mode = app.config.get('SESSION_PROTECTION', self.session_protection) # if there is no '_id', that should just count as miss? # if '_id' not in sess: # sess['_id'] = ident # if the sess is empty, it's an anonymous user, or just logged out # so we can skip this, unless 'strong' protection is active, # in which case we need to double check for the remember me token check_protection = sess or mode == 'strong' if check_protection and ident != sess.get('_id', None): if mode == 'basic' or sess.permanent: sess['_fresh'] = False session_protected.send(app) return False elif mode == 'strong': sess.clear() sess['remember'] = 'clear' session_protected.send(app) return True return False def _load_from_cookie(self, cookie): if self.token_callback: user = self.token_callback(cookie) if user is not None: session['user_id'] = getattr(user, self.id_attribute)() session['_fresh'] = False _request_ctx_stack.top.user = user else: self.reload_user() else: user_id = decode_cookie(cookie) if user_id is not None: session['user_id'] = user_id session['_fresh'] = False self.reload_user() if _request_ctx_stack.top.user is not None: app = current_app._get_current_object() user_loaded_from_cookie.send(app, user=_get_user()) def _load_from_header(self, header): user = None if self.header_callback: user = self.header_callback(header) if user is not None: self.reload_user(user=user) app = current_app._get_current_object() user_loaded_from_header.send(app, user=_get_user()) else: self.reload_user() def _update_remember_cookie(self, response): # Don't modify the session unless there's something to do. if 'remember' in session: operation = session.pop('remember', None) if operation == 'set' and 'user_id' in session: self._set_cookie(response) elif operation == 'clear': self._clear_cookie(response) return response def _set_cookie(self, response): # cookie settings config = current_app.config cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME) duration = config.get('REMEMBER_COOKIE_DURATION', COOKIE_DURATION) domain = config.get('REMEMBER_COOKIE_DOMAIN') secure = config.get('REMEMBER_COOKIE_SECURE', COOKIE_SECURE) httponly = config.get('REMEMBER_COOKIE_HTTPONLY', COOKIE_HTTPONLY) # prepare data if self.token_callback: data = current_user.get_auth_token() else: data = encode_cookie(str(session['user_id'])) expires = datetime.utcnow() + duration # actually set it response.set_cookie(cookie_name, value=data, expires=expires, domain=domain, secure=secure, httponly=httponly) def _clear_cookie(self, response): config = current_app.config cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME) domain = config.get('REMEMBER_COOKIE_DOMAIN') response.delete_cookie(cookie_name, domain=domain) class UserMixin(object): ''' This provides default implementations for the methods that Flask-Login expects user objects to have. ''' def is_active(self): return True def is_authenticated(self): return True def is_anonymous(self): return False def get_id(self): try: return unicode(self.id) except AttributeError: raise NotImplementedError('No `id` attribute - override `get_id`') def __eq__(self, other): ''' Checks the equality of two `UserMixin` objects using `get_id`. ''' if isinstance(other, UserMixin): return self.get_id() == other.get_id() return NotImplemented def __ne__(self, other): ''' Checks the inequality of two `UserMixin` objects using `get_id`. ''' equal = self.__eq__(other) if equal is NotImplemented: return NotImplemented return not equal if sys.version_info[0] != 2: # pragma: no cover # Python 3 implicitly set __hash__ to None if we override __eq__ # We set it back to its default implementation __hash__ = object.__hash__ class AnonymousUserMixin(object): ''' This is the default object for representing an anonymous user. ''' def is_authenticated(self): return False def is_active(self): return False def is_anonymous(self): return True def get_id(self): return def encode_cookie(payload): ''' This will encode a ``unicode`` value into a cookie, and sign that cookie with the app's secret key. :param payload: The value to encode, as `unicode`. :type payload: unicode ''' return u'{0}|{1}'.format(payload, _cookie_digest(payload)) def decode_cookie(cookie): ''' This decodes a cookie given by `encode_cookie`. If verification of the cookie fails, ``None`` will be implicitly returned. :param cookie: An encoded cookie. :type cookie: str ''' try: payload, digest = cookie.rsplit(u'|', 1) if hasattr(digest, 'decode'): digest = digest.decode('ascii') except ValueError: return if safe_str_cmp(_cookie_digest(payload), digest): return payload def make_next_param(login_url, current_url): ''' Reduces the scheme and host from a given URL so it can be passed to the given `login` URL more efficiently. :param login_url: The login URL being redirected to. :type login_url: str :param current_url: The URL to reduce. :type current_url: str ''' l = urlparse(login_url) c = urlparse(current_url) if (not l.scheme or l.scheme == c.scheme) and \ (not l.netloc or l.netloc == c.netloc): return urlunparse(('', '', c.path, c.params, c.query, '')) return current_url def login_url(login_view, next_url=None, next_field='next'): ''' Creates a URL for redirecting to a login page. If only `login_view` is provided, this will just return the URL for it. If `next_url` is provided, however, this will append a ``next=URL`` parameter to the query string so that the login view can redirect back to that URL. :param login_view: The name of the login view. (Alternately, the actual URL to the login view.) :type login_view: str :param next_url: The URL to give the login view for redirection. :type next_url: str :param next_field: What field to store the next URL in. (It defaults to ``next``.) :type next_field: str ''' if login_view.startswith(('https://', 'http://', '/')): base = login_view else: base = url_for(login_view) if next_url is None: return base parts = list(urlparse(base)) md = url_decode(parts[4]) md[next_field] = make_next_param(base, next_url) parts[4] = url_encode(md, sort=True) return urlunparse(parts) def make_secure_token(*args, **options): ''' This will create a secure token that you can use as an authentication token for your users. It uses heavy-duty HMAC encryption to prevent people from guessing the information. (To make it even more effective, if you will never need to regenerate the token, you can pass some random data as one of the arguments.) :param \*args: The data to include in the token. :type args: args :param \*\*options: To manually specify a secret key, pass ``key=THE_KEY``. Otherwise, the ``current_app`` secret key will be used. :type \*\*options: kwargs ''' key = options.get('key') key = _secret_key(key) l = [s if isinstance(s, bytes) else s.encode('utf-8') for s in args] payload = b'\0'.join(l) token_value = hmac.new(key, payload, sha1).hexdigest() if hasattr(token_value, 'decode'): # pragma: no cover token_value = token_value.decode('utf-8') # ensure bytes return token_value def login_fresh(): ''' This returns ``True`` if the current login is fresh. ''' return session.get('_fresh', False) def login_user(user, remember=False, force=False): ''' Logs a user in. You should pass the actual user object to this. If the user's `is_active` method returns ``False``, they will not be logged in unless `force` is ``True``. This will return ``True`` if the log in attempt succeeds, and ``False`` if it fails (i.e. because the user is inactive). :param user: The user object to log in. :type user: object :param remember: Whether to remember the user after their session expires. Defaults to ``False``. :type remember: bool :param force: If the user is inactive, setting this to ``True`` will log them in regardless. Defaults to ``False``. :type force: bool ''' if not force and not user.is_active(): return False user_id = getattr(user, current_app.login_manager.id_attribute)() session['user_id'] = user_id session['_fresh'] = True session['_id'] = _create_identifier() if remember: session['remember'] = 'set' _request_ctx_stack.top.user = user user_logged_in.send(current_app._get_current_object(), user=_get_user()) return True def logout_user(): ''' Logs a user out. (You do not need to pass the actual user.) This will also clean up the remember me cookie if it exists. ''' if 'user_id' in session: session.pop('user_id') if '_fresh' in session: session.pop('_fresh') cookie_name = current_app.config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME) if cookie_name in request.cookies: session['remember'] = 'clear' user = _get_user() if user and not user.is_anonymous(): user_logged_out.send(current_app._get_current_object(), user=user) current_app.login_manager.reload_user() return True def confirm_login(): ''' This sets the current session as fresh. Sessions become stale when they are reloaded from a cookie. ''' session['_fresh'] = True session['_id'] = _create_identifier() user_login_confirmed.send(current_app._get_current_object()) def login_required(func): ''' If you decorate a view with this, it will ensure that the current user is logged in and authenticated before calling the actual view. (If they are not, it calls the :attr:`LoginManager.unauthorized` callback.) For example:: @app.route('/post') @login_required def post(): pass If there are only certain times you need to require that your user is logged in, you can do so with:: if not current_user.is_authenticated(): return current_app.login_manager.unauthorized() ...which is essentially the code that this function adds to your views. It can be convenient to globally turn off authentication when unit testing. To enable this, if either of the application configuration variables `LOGIN_DISABLED` or `TESTING` is set to `True`, this decorator will be ignored. :param func: The view function to decorate. :type func: function ''' @wraps(func) def decorated_view(*args, **kwargs): if current_app.login_manager._login_disabled: return func(*args, **kwargs) elif not current_user.is_authenticated(): return current_app.login_manager.unauthorized() return func(*args, **kwargs) return decorated_view def fresh_login_required(func): ''' If you decorate a view with this, it will ensure that the current user's login is fresh - i.e. there session was not restored from a 'remember me' cookie. Sensitive operations, like changing a password or e-mail, should be protected with this, to impede the efforts of cookie thieves. If the user is not authenticated, :meth:`LoginManager.unauthorized` is called as normal. If they are authenticated, but their session is not fresh, it will call :meth:`LoginManager.needs_refresh` instead. (In that case, you will need to provide a :attr:`LoginManager.refresh_view`.) Behaves identically to the :func:`login_required` decorator with respect to configutation variables. :param func: The view function to decorate. :type func: function ''' @wraps(func) def decorated_view(*args, **kwargs): if current_app.login_manager._login_disabled: return func(*args, **kwargs) elif not current_user.is_authenticated(): return current_app.login_manager.unauthorized() elif not login_fresh(): return current_app.login_manager.needs_refresh() return func(*args, **kwargs) return decorated_view def _get_user(): if not hasattr(_request_ctx_stack.top, 'user'): current_app.login_manager._load_user() return getattr(_request_ctx_stack.top, 'user', None) def _cookie_digest(payload, key=None): key = _secret_key(key) return hmac.new(key, payload.encode('utf-8'), sha1).hexdigest() def _get_remote_addr(): address = request.headers.get('X-Forwarded-For', request.remote_addr) if address is not None: address = address.encode('utf-8') return address def _create_identifier(): user_agent = request.headers.get('User-Agent') if user_agent is not None: user_agent = user_agent.encode('utf-8') base = '{0}|{1}'.format(_get_remote_addr(), user_agent) if str is bytes: base = unicode(base, 'utf-8', errors='replace') h = md5() h.update(base.encode('utf8')) return h.hexdigest() def _user_context_processor(): return dict(current_user=_get_user()) def _secret_key(key=None): if key is None: key = current_app.config['SECRET_KEY'] if isinstance(key, unicode): # pragma: no cover key = key.encode('latin1') # ensure bytes return key # Signals #: Sent when a user is logged in. In addition to the app (which is the #: sender), it is passed `user`, which is the user being logged in. user_logged_in = _signals.signal('logged-in') #: Sent when a user is logged out. In addition to the app (which is the #: sender), it is passed `user`, which is the user being logged out. user_logged_out = _signals.signal('logged-out') #: Sent when the user is loaded from the cookie. In addition to the app (which #: is the sender), it is passed `user`, which is the user being reloaded. user_loaded_from_cookie = _signals.signal('loaded-from-cookie') #: Sent when the user is loaded from the header. In addition to the app (which #: is the #: sender), it is passed `user`, which is the user being reloaded. user_loaded_from_header = _signals.signal('loaded-from-header') #: Sent when a user's login is confirmed, marking it as fresh. (It is not #: called for a normal login.) #: It receives no additional arguments besides the app. user_login_confirmed = _signals.signal('login-confirmed') #: Sent when the `unauthorized` method is called on a `LoginManager`. It #: receives no additional arguments besides the app. user_unauthorized = _signals.signal('unauthorized') #: Sent when the `needs_refresh` method is called on a `LoginManager`. It #: receives no additional arguments besides the app. user_needs_refresh = _signals.signal('needs-refresh') #: Sent whenever the user is accessed/loaded #: receives no additional arguments besides the app. user_accessed = _signals.signal('accessed') #: Sent whenever session protection takes effect, and a session is either #: marked non-fresh or deleted. It receives no additional arguments besides #: the app. session_protected = _signals.signal('session-protected')
from sympy import (symbols, sympify, Dummy, simplify, Equality, S, Interval, oo, EmptySet, Q) from sympy.logic.boolalg import ( And, Boolean, Equivalent, ITE, Implies, Nand, Nor, Not, Or, POSform, SOPform, Xor, conjuncts, disjuncts, distribute_or_over_and, distribute_and_over_or, eliminate_implications, is_nnf, is_cnf, is_dnf, simplify_logic, to_nnf, to_cnf, to_dnf, to_int_repr, bool_map, true, false, BooleanAtom, is_literal ) from sympy.utilities.pytest import raises, XFAIL from sympy.utilities import cartes A, B, C, D= symbols('A,B,C,D') def test_overloading(): """Test that |, & are overloaded as expected""" assert A & B == And(A, B) assert A | B == Or(A, B) assert (A & B) | C == Or(And(A, B), C) assert A >> B == Implies(A, B) assert A << B == Implies(B, A) assert ~A == Not(A) assert A ^ B == Xor(A, B) def test_And(): assert And() is true assert And(A) == A assert And(True) is true assert And(False) is false assert And(True, True ) is true assert And(True, False) is false assert And(False, False) is false assert And(True, A) == A assert And(False, A) is false assert And(True, True, True) is true assert And(True, True, A) == A assert And(True, False, A) is false assert And(2, A) == A assert And(2, 3) is true assert And(A < 1, A >= 1) is false e = A > 1 assert And(e, e.canonical) == e.canonical g, l, ge, le = A > B, B < A, A >= B, B <= A assert And(g, l, ge, le) == And(l, le) def test_Or(): assert Or() is false assert Or(A) == A assert Or(True) is true assert Or(False) is false assert Or(True, True ) is true assert Or(True, False) is true assert Or(False, False) is false assert Or(True, A) is true assert Or(False, A) == A assert Or(True, False, False) is true assert Or(True, False, A) is true assert Or(False, False, A) == A assert Or(2, A) is true assert Or(A < 1, A >= 1) is true e = A > 1 assert Or(e, e.canonical) == e g, l, ge, le = A > B, B < A, A >= B, B <= A assert Or(g, l, ge, le) == Or(g, ge) def test_Xor(): assert Xor() is false assert Xor(A) == A assert Xor(A, A) is false assert Xor(True, A, A) is true assert Xor(A, A, A, A, A) == A assert Xor(True, False, False, A, B) == ~Xor(A, B) assert Xor(True) is true assert Xor(False) is false assert Xor(True, True ) is false assert Xor(True, False) is true assert Xor(False, False) is false assert Xor(True, A) == ~A assert Xor(False, A) == A assert Xor(True, False, False) is true assert Xor(True, False, A) == ~A assert Xor(False, False, A) == A assert isinstance(Xor(A, B), Xor) assert Xor(A, B, Xor(C, D)) == Xor(A, B, C, D) assert Xor(A, B, Xor(B, C)) == Xor(A, C) assert Xor(A < 1, A >= 1, B) == Xor(0, 1, B) == Xor(1, 0, B) e = A > 1 assert Xor(e, e.canonical) == Xor(0, 0) == Xor(1, 1) def test_Not(): raises(TypeError, lambda: Not(True, False)) assert Not(True) is false assert Not(False) is true assert Not(0) is true assert Not(1) is false assert Not(2) is false def test_Nand(): assert Nand() is false assert Nand(A) == ~A assert Nand(True) is false assert Nand(False) is true assert Nand(True, True ) is false assert Nand(True, False) is true assert Nand(False, False) is true assert Nand(True, A) == ~A assert Nand(False, A) is true assert Nand(True, True, True) is false assert Nand(True, True, A) == ~A assert Nand(True, False, A) is true def test_Nor(): assert Nor() is true assert Nor(A) == ~A assert Nor(True) is false assert Nor(False) is true assert Nor(True, True ) is false assert Nor(True, False) is false assert Nor(False, False) is true assert Nor(True, A) is false assert Nor(False, A) == ~A assert Nor(True, True, True) is false assert Nor(True, True, A) is false assert Nor(True, False, A) is false def test_Implies(): raises(ValueError, lambda: Implies(A, B, C)) assert Implies(True, True) is true assert Implies(True, False) is false assert Implies(False, True) is true assert Implies(False, False) is true assert Implies(0, A) is true assert Implies(1, 1) is true assert Implies(1, 0) is false assert A >> B == B << A assert (A < 1) >> (A >= 1) == (A >= 1) assert (A < 1) >> (S(1) > A) is true assert A >> A is true def test_Equivalent(): assert Equivalent(A, B) == Equivalent(B, A) == Equivalent(A, B, A) assert Equivalent() is true assert Equivalent(A, A) == Equivalent(A) is true assert Equivalent(True, True) == Equivalent(False, False) is true assert Equivalent(True, False) == Equivalent(False, True) is false assert Equivalent(A, True) == A assert Equivalent(A, False) == Not(A) assert Equivalent(A, B, True) == A & B assert Equivalent(A, B, False) == ~A & ~B assert Equivalent(1, A) == A assert Equivalent(0, A) == Not(A) assert Equivalent(A, Equivalent(B, C)) != Equivalent(Equivalent(A, B), C) assert Equivalent(A < 1, A >= 1) is false assert Equivalent(A < 1, A >= 1, 0) is false assert Equivalent(A < 1, A >= 1, 1) is false assert Equivalent(A < 1, S(1) > A) == Equivalent(1, 1) == Equivalent(0, 0) def test_equals(): assert Not(Or(A, B)).equals( And(Not(A), Not(B)) ) is True assert Equivalent(A, B).equals((A >> B) & (B >> A)) is True assert ((A | ~B) & (~A | B)).equals((~A & ~B) | (A & B)) is True assert (A >> B).equals(~A >> ~B) is False assert (A >> (B >> A)).equals(A >> (C >> A)) is False raises(NotImplementedError, lambda: And(A, A < B).equals(And(A, B > A))) def test_simplification(): """ Test working of simplification methods. """ set1 = [[0, 0, 1], [0, 1, 1], [1, 0, 0], [1, 1, 0]] set2 = [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1]] from sympy.abc import w, x, y, z assert SOPform('xyz', set1) == Or(And(Not(x), z), And(Not(z), x)) assert Not(SOPform('xyz', set2)) == Not(Or(And(Not(x), Not(z)), And(x, z))) assert POSform('xyz', set1 + set2) is true assert SOPform('xyz', set1 + set2) is true assert SOPform([Dummy(), Dummy(), Dummy()], set1 + set2) is true minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]] dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]] assert ( SOPform('wxyz', minterms, dontcares) == Or(And(Not(w), z), And(y, z))) assert POSform('wxyz', minterms, dontcares) == And(Or(Not(w), y), z) # test simplification ans = And(A, Or(B, C)) assert simplify_logic('A & (B | C)') == ans assert simplify_logic('(A & B) | (A & C)') == ans assert simplify_logic(Implies(A, B)) == Or(Not(A), B) assert simplify_logic(Equivalent(A, B)) == \ Or(And(A, B), And(Not(A), Not(B))) assert simplify_logic(And(Equality(A, 2), C)) == And(Equality(A, 2), C) assert simplify_logic(And(Equality(A, 2), A)) == And(Equality(A, 2), A) assert simplify_logic(And(Equality(A, B), C)) == And(Equality(A, B), C) assert simplify_logic(Or(And(Equality(A, 3), B), And(Equality(A, 3), C))) \ == And(Equality(A, 3), Or(B, C)) e = And(A, x**2 - x) assert simplify_logic(e) == And(A, x*(x - 1)) assert simplify_logic(e, deep=False) == e # check input ans = SOPform('xy', [[1, 0]]) assert SOPform([x, y], [[1, 0]]) == ans assert POSform(['x', 'y'], [[1, 0]]) == ans raises(ValueError, lambda: SOPform('x', [[1]], [[1]])) assert SOPform('x', [[1]], [[0]]) is true assert SOPform('x', [[0]], [[1]]) is true assert SOPform('x', [], []) is false raises(ValueError, lambda: POSform('x', [[1]], [[1]])) assert POSform('x', [[1]], [[0]]) is true assert POSform('x', [[0]], [[1]]) is true assert POSform('x', [], []) is false # check working of simplify assert simplify('(A & B) | (A & C)') == sympify('And(A, Or(B, C))') assert simplify(And(x, Not(x))) == False assert simplify(Or(x, Not(x))) == True def test_bool_map(): """ Test working of bool_map function. """ minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]] from sympy.abc import a, b, c, w, x, y, z assert bool_map(Not(Not(a)), a) == (a, {a: a}) assert bool_map(SOPform(['w', 'x', 'y', 'z'], minterms), POSform(['w', 'x', 'y', 'z'], minterms)) == \ (And(Or(Not(w), y), Or(Not(x), y), z), {x: x, w: w, z: z, y: y}) assert bool_map(SOPform(['x', 'z', 'y'],[[1, 0, 1]]), SOPform(['a', 'b', 'c'],[[1, 0, 1]])) != False function1 = SOPform(['x','z','y'],[[1, 0, 1], [0, 0, 1]]) function2 = SOPform(['a','b','c'],[[1, 0, 1], [1, 0, 0]]) assert bool_map(function1, function2) == \ (function1, {y: a, z: b}) def test_bool_symbol(): """Test that mixing symbols with boolean values works as expected""" assert And(A, True) == A assert And(A, True, True) == A assert And(A, False) is false assert And(A, True, False) is false assert Or(A, True) is true assert Or(A, False) == A def test_subs(): assert (A & B).subs(A, True) == B assert (A & B).subs(A, False) is false assert (A & B).subs(B, True) == A assert (A & B).subs(B, False) is false assert (A & B).subs({A: True, B: True}) is true assert (A | B).subs(A, True) is true assert (A | B).subs(A, False) == B assert (A | B).subs(B, True) is true assert (A | B).subs(B, False) == A assert (A | B).subs({A: True, B: True}) is true """ we test for axioms of boolean algebra see http://en.wikipedia.org/wiki/Boolean_algebra_(structure) """ def test_commutative(): """Test for commutativity of And and Or""" A, B = map(Boolean, symbols('A,B')) assert A & B == B & A assert A | B == B | A def test_and_associativity(): """Test for associativity of And""" assert (A & B) & C == A & (B & C) def test_or_assicativity(): assert ((A | B) | C) == (A | (B | C)) def test_double_negation(): a = Boolean() assert ~(~a) == a # test methods def test_eliminate_implications(): from sympy.abc import A, B, C, D assert eliminate_implications(Implies(A, B, evaluate=False)) == (~A) | B assert eliminate_implications( A >> (C >> Not(B))) == Or(Or(Not(B), Not(C)), Not(A)) assert eliminate_implications(Equivalent(A, B, C, D)) == \ (~A | B) & (~B | C) & (~C | D) & (~D | A) def test_conjuncts(): assert conjuncts(A & B & C) == set([A, B, C]) assert conjuncts((A | B) & C) == set([A | B, C]) assert conjuncts(A) == set([A]) assert conjuncts(True) == set([True]) assert conjuncts(False) == set([False]) def test_disjuncts(): assert disjuncts(A | B | C) == set([A, B, C]) assert disjuncts((A | B) & C) == set([(A | B) & C]) assert disjuncts(A) == set([A]) assert disjuncts(True) == set([True]) assert disjuncts(False) == set([False]) def test_distribute(): assert distribute_and_over_or(Or(And(A, B), C)) == And(Or(A, C), Or(B, C)) assert distribute_or_over_and(And(A, Or(B, C))) == Or(And(A, B), And(A, C)) def test_to_nnf(): assert to_nnf(true) is true assert to_nnf(false) is false assert to_nnf(A) == A assert to_nnf(A | ~A | B) is true assert to_nnf(A & ~A & B) is false assert to_nnf(A >> B) == ~A | B assert to_nnf(Equivalent(A, B, C)) == (~A | B) & (~B | C) & (~C | A) assert to_nnf(A ^ B ^ C) == \ (A | B | C) & (~A | ~B | C) & (A | ~B | ~C) & (~A | B | ~C) assert to_nnf(ITE(A, B, C)) == (~A | B) & (A | C) assert to_nnf(Not(A | B | C)) == ~A & ~B & ~C assert to_nnf(Not(A & B & C)) == ~A | ~B | ~C assert to_nnf(Not(A >> B)) == A & ~B assert to_nnf(Not(Equivalent(A, B, C))) == And(Or(A, B, C), Or(~A, ~B, ~C)) assert to_nnf(Not(A ^ B ^ C)) == \ (~A | B | C) & (A | ~B | C) & (A | B | ~C) & (~A | ~B | ~C) assert to_nnf(Not(ITE(A, B, C))) == (~A | ~B) & (A | ~C) assert to_nnf((A >> B) ^ (B >> A)) == (A & ~B) | (~A & B) assert to_nnf((A >> B) ^ (B >> A), False) == \ (~A | ~B | A | B) & ((A & ~B) | (~A & B)) def test_to_cnf(): assert to_cnf(~(B | C)) == And(Not(B), Not(C)) assert to_cnf((A & B) | C) == And(Or(A, C), Or(B, C)) assert to_cnf(A >> B) == (~A) | B assert to_cnf(A >> (B & C)) == (~A | B) & (~A | C) assert to_cnf(A & (B | C) | ~A & (B | C), True) == B | C assert to_cnf(Equivalent(A, B)) == And(Or(A, Not(B)), Or(B, Not(A))) assert to_cnf(Equivalent(A, B & C)) == \ (~A | B) & (~A | C) & (~B | ~C | A) assert to_cnf(Equivalent(A, B | C), True) == \ And(Or(Not(B), A), Or(Not(C), A), Or(B, C, Not(A))) def test_to_dnf(): assert to_dnf(~(B | C)) == And(Not(B), Not(C)) assert to_dnf(A & (B | C)) == Or(And(A, B), And(A, C)) assert to_dnf(A >> B) == (~A) | B assert to_dnf(A >> (B & C)) == (~A) | (B & C) assert to_dnf(Equivalent(A, B), True) == \ Or(And(A, B), And(Not(A), Not(B))) assert to_dnf(Equivalent(A, B & C), True) == \ Or(And(A, B, C), And(Not(A), Not(B)), And(Not(A), Not(C))) def test_to_int_repr(): x, y, z = map(Boolean, symbols('x,y,z')) def sorted_recursive(arg): try: return sorted(sorted_recursive(x) for x in arg) except TypeError: # arg is not a sequence return arg assert sorted_recursive(to_int_repr([x | y, z | x], [x, y, z])) == \ sorted_recursive([[1, 2], [1, 3]]) assert sorted_recursive(to_int_repr([x | y, z | ~x], [x, y, z])) == \ sorted_recursive([[1, 2], [3, -1]]) def test_is_nnf(): from sympy.abc import A, B assert is_nnf(true) is True assert is_nnf(A) is True assert is_nnf(~A) is True assert is_nnf(A & B) is True assert is_nnf((A & B) | (~A & A) | (~B & B) | (~A & ~B), False) is True assert is_nnf((A | B) & (~A | ~B)) is True assert is_nnf(Not(Or(A, B))) is False assert is_nnf(A ^ B) is False assert is_nnf((A & B) | (~A & A) | (~B & B) | (~A & ~B), True) is False def test_is_cnf(): x, y, z = symbols('x,y,z') assert is_cnf(x) is True assert is_cnf(x | y | z) is True assert is_cnf(x & y & z) is True assert is_cnf((x | y) & z) is True assert is_cnf((x & y) | z) is False def test_is_dnf(): x, y, z = symbols('x,y,z') assert is_dnf(x) is True assert is_dnf(x | y | z) is True assert is_dnf(x & y & z) is True assert is_dnf((x & y) | z) is True assert is_dnf((x | y) & z) is False def test_ITE(): A, B, C = map(Boolean, symbols('A,B,C')) assert ITE(True, False, True) is false assert ITE(True, True, False) is true assert ITE(False, True, False) is false assert ITE(False, False, True) is true assert isinstance(ITE(A, B, C), ITE) A = True assert ITE(A, B, C) == B A = False assert ITE(A, B, C) == C B = True assert ITE(And(A, B), B, C) == C assert ITE(Or(A, False), And(B, True), False) is false def test_is_literal(): assert is_literal(True) is True assert is_literal(False) is True assert is_literal(A) is True assert is_literal(~A) is True assert is_literal(Or(A, B)) is False assert is_literal(Q.zero(A)) is True assert is_literal(Not(Q.zero(A))) is True assert is_literal(Or(A, B)) is False assert is_literal(And(Q.zero(A), Q.zero(B))) is False def test_operators(): # Mostly test __and__, __rand__, and so on assert True & A == A & True == A assert False & A == A & False == False assert A & B == And(A, B) assert True | A == A | True == True assert False | A == A | False == A assert A | B == Or(A, B) assert ~A == Not(A) assert True >> A == A << True == A assert False >> A == A << False == True assert A >> True == True << A == True assert A >> False == False << A == ~A assert A >> B == B << A == Implies(A, B) assert True ^ A == A ^ True == ~A assert False ^ A == A ^ False == A assert A ^ B == Xor(A, B) def test_true_false(): x = symbols('x') assert true is S.true assert false is S.false assert true is not True assert false is not False assert true assert not false assert true == True assert false == False assert not (true == False) assert not (false == True) assert not (true == false) assert hash(true) == hash(True) assert hash(false) == hash(False) assert len(set([true, True])) == len(set([false, False])) == 1 assert isinstance(true, BooleanAtom) assert isinstance(false, BooleanAtom) # We don't want to subclass from bool, because bool subclasses from # int. But operators like &, |, ^, <<, >>, and ~ act differently on 0 and # 1 then we want them to on true and false. See the docstrings of the # various And, Or, etc. functions for examples. assert not isinstance(true, bool) assert not isinstance(false, bool) # Note: using 'is' comparison is important here. We want these to return # true and false, not True and False assert Not(true) is false assert Not(True) is false assert Not(false) is true assert Not(False) is true assert ~true is false assert ~false is true for T, F in cartes([True, true], [False, false]): assert And(T, F) is false assert And(F, T) is false assert And(F, F) is false assert And(T, T) is true assert And(T, x) == x assert And(F, x) is false if not (T is True and F is False): assert T & F is false assert F & T is false if not F is False: assert F & F is false if not T is True: assert T & T is true assert Or(T, F) is true assert Or(F, T) is true assert Or(F, F) is false assert Or(T, T) is true assert Or(T, x) is true assert Or(F, x) == x if not (T is True and F is False): assert T | F is true assert F | T is true if not F is False: assert F | F is false if not T is True: assert T | T is true assert Xor(T, F) is true assert Xor(F, T) is true assert Xor(F, F) is false assert Xor(T, T) is false assert Xor(T, x) == ~x assert Xor(F, x) == x if not (T is True and F is False): assert T ^ F is true assert F ^ T is true if not F is False: assert F ^ F is false if not T is True: assert T ^ T is false assert Nand(T, F) is true assert Nand(F, T) is true assert Nand(F, F) is true assert Nand(T, T) is false assert Nand(T, x) == ~x assert Nand(F, x) is true assert Nor(T, F) is false assert Nor(F, T) is false assert Nor(F, F) is true assert Nor(T, T) is false assert Nor(T, x) is false assert Nor(F, x) == ~x assert Implies(T, F) is false assert Implies(F, T) is true assert Implies(F, F) is true assert Implies(T, T) is true assert Implies(T, x) == x assert Implies(F, x) is true assert Implies(x, T) is true assert Implies(x, F) == ~x if not (T is True and F is False): assert T >> F is false assert F << T is false assert F >> T is true assert T << F is true if not F is False: assert F >> F is true assert F << F is true if not T is True: assert T >> T is true assert T << T is true assert Equivalent(T, F) is false assert Equivalent(F, T) is false assert Equivalent(F, F) is true assert Equivalent(T, T) is true assert Equivalent(T, x) == x assert Equivalent(F, x) == ~x assert Equivalent(x, T) == x assert Equivalent(x, F) == ~x assert ITE(T, T, T) is true assert ITE(T, T, F) is true assert ITE(T, F, T) is false assert ITE(T, F, F) is false assert ITE(F, T, T) is true assert ITE(F, T, F) is false assert ITE(F, F, T) is true assert ITE(F, F, F) is false def test_bool_as_set(): x = symbols('x') assert And(x <= 2, x >= -2).as_set() == Interval(-2, 2) assert Or(x >= 2, x <= -2).as_set() == Interval(-oo, -2) + Interval(2, oo) assert Not(x > 2).as_set() == Interval(-oo, 2) assert true.as_set() == S.UniversalSet assert false.as_set() == EmptySet() @XFAIL def test_multivariate_bool_as_set(): x, y = symbols('x,y') assert And(x >= 0, y >= 0).as_set() == Interval(0, oo)*Interval(0, oo) assert Or(x >= 0, y >= 0).as_set() == S.Reals*S.Reals - \ Interval(-oo, 0, True, True)*Interval(-oo, 0, True, True) def test_all_or_nothing(): x = symbols('x', real=True) args = x >=- oo, x <= oo v = And(*args) if v.func is And: assert len(v.args) == len(args) - args.count(S.true) else: assert v == True v = Or(*args) if v.func is Or: assert len(v.args) == 2 else: assert v == True def test_canonical_atoms(): assert true.canonical == true assert false.canonical == false
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for yapf.split_penalty.""" import sys import textwrap import unittest from lib2to3 import pytree from yapf.yapflib import pytree_utils from yapf.yapflib import pytree_visitor from yapf.yapflib import split_penalty UNBREAKABLE = split_penalty.UNBREAKABLE STRONGLY_CONNECTED = split_penalty.STRONGLY_CONNECTED CONTIGUOUS_LIST = split_penalty.CONTIGUOUS_LIST class SplitPenaltyTest(unittest.TestCase): def _ParseAndComputePenalties(self, code, dumptree=False): """Parses the code and computes split penalties. Arguments: code: code to parse as a string dumptree: if True, the parsed pytree (after penalty assignment) is dumped to stderr. Useful for debugging. Returns: Parse tree. """ tree = pytree_utils.ParseCodeToTree(code) split_penalty.ComputeSplitPenalties(tree) if dumptree: pytree_visitor.DumpPyTree(tree, target_stream=sys.stderr) return tree def _CheckPenalties(self, tree, list_of_expected): """Check that the tokens in the tree have the correct penalties. Args: tree: the pytree. list_of_expected: list of (name, penalty) pairs. Non-semantic tokens are filtered out from the expected values. """ def FlattenRec(tree): if pytree_utils.NodeName(tree) in pytree_utils.NONSEMANTIC_TOKENS: return [] if isinstance(tree, pytree.Leaf): return [(tree.value, pytree_utils.GetNodeAnnotation( tree, pytree_utils.Annotation.SPLIT_PENALTY))] nodes = [] for node in tree.children: nodes += FlattenRec(node) return nodes self.assertEqual(list_of_expected, FlattenRec(tree)) def testUnbreakable(self): # Test function definitions. code = textwrap.dedent(r""" def foo(x): pass """) tree = self._ParseAndComputePenalties(code) self._CheckPenalties(tree, [ ('def', None), ('foo', UNBREAKABLE), ('(', UNBREAKABLE), ('x', None), (')', None), (':', UNBREAKABLE), ('pass', None), ]) # yapf: disable # Test function definition with trailing comment. code = textwrap.dedent(r""" def foo(x): # trailing comment pass """) tree = self._ParseAndComputePenalties(code) self._CheckPenalties(tree, [ ('def', None), ('foo', UNBREAKABLE), ('(', UNBREAKABLE), ('x', None), (')', None), (':', UNBREAKABLE), ('pass', None), ]) # yapf: disable # Test class definitions. code = textwrap.dedent(r""" class A: pass class B(A): pass """) tree = self._ParseAndComputePenalties(code) self._CheckPenalties(tree, [ ('class', None), ('A', UNBREAKABLE), (':', UNBREAKABLE), ('pass', None), ('class', None), ('B', UNBREAKABLE), ('(', UNBREAKABLE), ('A', None), (')', None), (':', UNBREAKABLE), ('pass', None), ]) # yapf: disable # Test lambda definitions. code = textwrap.dedent(r""" lambda a, b: None """) tree = self._ParseAndComputePenalties(code) self._CheckPenalties(tree, [ ('lambda', None), ('a', UNBREAKABLE), (',', UNBREAKABLE), ('b', UNBREAKABLE), (':', UNBREAKABLE), ('None', UNBREAKABLE), ]) # yapf: disable # Test dotted names. code = textwrap.dedent(r""" import a.b.c """) tree = self._ParseAndComputePenalties(code) self._CheckPenalties(tree, [ ('import', None), ('a', None), ('.', UNBREAKABLE), ('b', UNBREAKABLE), ('.', UNBREAKABLE), ('c', UNBREAKABLE), ]) # yapf: disable def testStronglyConnected(self): # Test dictionary keys. code = textwrap.dedent(r""" a = { 'x': 42, y(lambda a: 23): 37, } """) tree = self._ParseAndComputePenalties(code) self._CheckPenalties(tree, [ ('a', None), ('=', None), ('{', None), ("'x'", STRONGLY_CONNECTED), (':', STRONGLY_CONNECTED), ('42', None), (',', None), ('y', STRONGLY_CONNECTED), ('(', UNBREAKABLE), ('lambda', STRONGLY_CONNECTED), ('a', UNBREAKABLE), (':', UNBREAKABLE), ('23', UNBREAKABLE), (')', UNBREAKABLE), (':', STRONGLY_CONNECTED), ('37', None), (',', None), ('}', None), ]) # yapf: disable # Test list comprehension. code = textwrap.dedent(r""" [a for a in foo if a.x == 37] """) tree = self._ParseAndComputePenalties(code) self._CheckPenalties(tree, [ ('[', None), ('a', None), ('for', None), ('a', STRONGLY_CONNECTED), ('in', STRONGLY_CONNECTED), ('foo', STRONGLY_CONNECTED), ('if', 0), ('a', STRONGLY_CONNECTED), ('.', UNBREAKABLE), ('x', UNBREAKABLE), ('==', STRONGLY_CONNECTED), ('37', STRONGLY_CONNECTED), (']', None), ]) # yapf: disable def testFuncCalls(self): code = 'foo(1, 2, 3)\n' tree = self._ParseAndComputePenalties(code) self._CheckPenalties(tree, [ ('foo', None), ('(', UNBREAKABLE), ('1', CONTIGUOUS_LIST), (',', CONTIGUOUS_LIST), ('2', CONTIGUOUS_LIST), (',', CONTIGUOUS_LIST), ('3', CONTIGUOUS_LIST), (')', UNBREAKABLE)]) # yapf: disable # Now a method call, which has more than one trailer code = 'foo.bar.baz(1, 2, 3)\n' tree = self._ParseAndComputePenalties(code) self._CheckPenalties(tree, [ ('foo', None), ('.', UNBREAKABLE), ('bar', UNBREAKABLE), ('.', UNBREAKABLE), ('baz', UNBREAKABLE), ('(', UNBREAKABLE), ('1', CONTIGUOUS_LIST), (',', CONTIGUOUS_LIST), ('2', CONTIGUOUS_LIST), (',', CONTIGUOUS_LIST), ('3', CONTIGUOUS_LIST), (')', UNBREAKABLE)]) # yapf: disable if __name__ == '__main__': unittest.main()
#### PATTERN | XX ################################################################################## # -*- coding: utf-8 -*- # Copyright (c) year, institute, country # Author: Name (e-mail) # License: BSD (see LICENSE.txt for details). # http://www.clips.ua.ac.be/pages/pattern #################################################################################################### # Template for pattern.xx, bundling natural language processing tools for language XXXXX. # The module bundles a shallow parser (part-of-speech tagger, chunker, lemmatizer) # with functions for word inflection (singularization, pluralization, conjugation) # and sentiment analysis. # Base classes for the parser, verb table and sentiment lexicon are inherited from pattern.text. # The parser can be subclassed with a custom tokenizer (finds sentence boundaries) # and lemmatizer (uses word inflection to find the base form of words). # The part-of-speech tagger requires a lexicon of tagged known words and rules for unknown words. # Tools for word inflection should be bundled in pattern.text.xx.inflect. import os import sys try: MODULE = os.path.dirname(os.path.abspath(__file__)) except: MODULE = "" sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", "..")) # Import parser base classes. from pattern.text import ( Lexicon, Spelling, Parser as _Parser, ngrams, pprint, commandline, PUNCTUATION ) # Import parse tree base classes. from pattern.text.tree import ( Tree, Text, Sentence, Slice, Chunk, PNPChunk, Chink, Word, table, SLASH, WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA, AND, OR ) # Import sentiment analysis base classes. from pattern.text import ( Sentiment, NOUN, VERB, ADJECTIVE, ADVERB, MOOD, IRONY ) # Import verb tenses. from pattern.text import ( INFINITIVE, PRESENT, PAST, FUTURE, FIRST, SECOND, THIRD, SINGULAR, PLURAL, SG, PL, PROGRESSIVE, PARTICIPLE ) # Import inflection functions. from pattern.text.xx.inflect import ( article, referenced, DEFINITE, INDEFINITE, pluralize, singularize, NOUN, VERB, ADJECTIVE, verbs, conjugate, lemma, lexeme, tenses, predicative, attributive ) # Import all submodules. from pattern.text.xx import inflect sys.path.pop(0) #--- PARSER ---------------------------------------------------------------------------------------- # Pattern uses the Penn Treebank II tagset (http://www.clips.ua.ac.be/pages/penn-treebank-tagset). # The lexicon for pattern.xx may be using a different tagset (e.g., PAROLE, WOTAN). # The following functions are meant to map the tags to Penn Treebank II, see Parser.find_chunks(). TAGSET = {"??": "NN"} # pattern.xx tagset => Penn Treebank II. def tagset2penntreebank(tag): return TAGSET.get(tag, tag) # Different languages have different contractions (e.g., English "I've" or French "j'ai") # and abbreviations. The following functions define contractions and abbreviations # for pattern.xx, see also Parser.find_tokens(). REPLACEMENTS = {"'s": " 's", "'ve": " 've"} ABBREVIATIONS = set(("e.g.", "etc.", "i.e.")) # A lemmatizer can be constructed if we have a pattern.xx.inflect, # with functions for noun singularization and verb conjugation (i.e., infinitives). def find_lemmata(tokens): """ Annotates the tokens with lemmata for plural nouns and conjugated verbs, where each token is a [word, part-of-speech] list. """ for token in tokens: word, pos, lemma = token[0], token[1], token[0] if pos.startswith("JJ"): lemma = predicative(word) if pos == "NNS": lemma = singularize(word) if pos.startswith(("VB", "MD")): lemma = conjugate(word, INFINITIVE) or word token.append(lemma.lower()) return tokens # Subclass the base parser with the language-specific functionality: class Parser(_Parser): def find_tokens(self, tokens, **kwargs): kwargs.setdefault("abbreviations", ABBREVIATIONS) kwargs.setdefault("replace", REPLACEMENTS) return _Parser.find_tokens(self, tokens, **kwargs) def find_tags(self, tokens, **kwargs): kwargs.setdefault("map", tagset2penntreebank) return _Parser.find_tags(self, tokens, **kwargs) def find_chunks(self, tokens, **kwargs): return _Parser.find_chunks(self, tokens, **kwargs) def find_lemmata(self, tokens, **kwargs): return find_lemmata(tokens) # The parser's part-of-speech tagger requires a lexicon of tagged known words, # and rules for unknown words. See pattern.text.Morphology and pattern.text.Context # for further details. A tutorial on how to acquire data for the lexicon is here: # http://www.clips.ua.ac.be/pages/using-wiktionary-to-build-an-italian-part-of-speech-tagger lexicon = Lexicon( path = os.path.join(MODULE, "xx-lexicon.txt"), morphology = os.path.join(MODULE, "xx-morphology.txt"), context = os.path.join(MODULE, "xx-context.txt"), entities = os.path.join(MODULE, "xx-entities.txt"), language = "xx" ) # Create the parser with default tags for unknown words: # (noun, proper noun, numeric). parser = Parser( lexicon = lexicon, default = ("NN", "NNP", "CD"), language = "xx" ) # Create the sentiment lexicon, # see pattern/text/xx/xx-sentiment.xml for further details. # We also need to define the tag for modifiers, # words that modify the score of the following word # (e.g., *very* good, *not good, ...) sentiment = Sentiment( path = os.path.join(MODULE, "xx-sentiment.xml"), synset = None, negations = ("no", "not", "never"), modifiers = ("RB",), modifier = lambda w: w.endswith("ly"), # brilliantly, hardly, partially, ... language = "xx" ) # Nothing should be changed below. def tokenize(s, *args, **kwargs): """ Returns a list of sentences, where punctuation marks have been split from words. """ return parser.find_tokens(s, *args, **kwargs) def parse(s, *args, **kwargs): """ Returns a tagged Unicode string. """ return parser.parse(s, *args, **kwargs) def parsetree(s, *args, **kwargs): """ Returns a parsed Text from the given string. """ return Text(parse(s, *args, **kwargs)) def split(s, token=[WORD, POS, CHUNK, PNP]): """ Returns a parsed Text from the given parsed string. """ return Text(s, token) def tag(s, tokenize=True, encoding="utf-8"): """ Returns a list of (token, tag)-tuples from the given string. """ tags = [] for sentence in parse(s, tokenize, True, False, False, False, encoding).split(): for token in sentence: tags.append((token[0], token[1])) return tags def polarity(s, **kwargs): """ Returns the sentence polarity (positive/negative) between -1.0 and 1.0. """ return sentiment(s, **kwargs)[0] def subjectivity(s, **kwargs): """ Returns the sentence subjectivity (objective/subjective) between 0.0 and 1.0. """ return sentiment(s, **kwargs)[1] def positive(s, threshold=0.1, **kwargs): """ Returns True if the given sentence has a positive sentiment. """ return polarity(s, **kwargs) >= threshold #print parse("The happy cat sat on the mat.", lemmata=True) #print singularize("cats") #print polarity("very happy") #--------------------------------------------------------------------------------------------------- # python -m pattern.xx xml -s "..." -OTCL if __name__ == "__main__": commandline(parse)
""" .. module:: task_runners.task_runner :synopsis: Base TaskRunner class to use with DivePythonOperator .. moduleauthor:: Laura Lorenz <llorenz@industrydive.com> .. moduleauthor:: Miriam Sexton <miriam@industrydive.com> """ import datetime import json from fileflow.utils import read_and_clean_csv_to_dataframe, clean_and_write_dataframe_to_csv from fileflow.storage_drivers import get_storage_driver class TaskRunner(object): def __init__(self, context): # The upstream dependencies # These must always be specified # Dictionary can contain any number of keys which must be redirected in the business logic to their read/parse methods self.data_dependencies = context.pop('data_dependencies', {}) # The task instance. self.task_instance = context['ti'] self.date = context['execution_date'] # Picking a storage driver for this task instance. self.storage = get_storage_driver() def get_input_filename(self, data_dependency, dag_id=None): """ Generate the default input filename for a class. :param str data_dependency: Key for the target data_dependency in self.data_dependencies that you want to construct a filename for. :param str dag_id: Defaults to the current DAG id :return: File system path or S3 URL to the input file. :rtype: str """ if dag_id is None: dag_id = self.task_instance.dag_id task_id = self.data_dependencies[data_dependency] return self.storage.get_filename(dag_id, task_id, self.date) def get_output_filename(self): """ Generate the default output filename or S3 URL for this task instance. :return: File system path to output filename :rtype: str """ return self.storage.get_filename( self.task_instance.dag_id, self.task_instance.task_id, self.date ) def get_upstream_stream(self, data_dependency_key, dag_id=None): """ Returns a stream to the file that was output by a seperate task in the same dag. :param str data_dependency_key: The key (business logic name) for the upstream dependency. This will get the value from the self.data_dependencies dictionary to determine the file to read from. :param str dag_id: Defaults to the current DAG id. :param str encoding: The file encoding to use. Defaults to 'utf-8'. :return: stream to the file :rtype: stream """ if dag_id is None: dag_id = self.task_instance.dag_id task_id = self.data_dependencies[data_dependency_key] stream = self.storage.get_read_stream(dag_id, task_id, self.date) # Just make 100% sure we're at the beginning stream.seek(0) return stream def read_upstream_file(self, data_dependency_key, dag_id=None, encoding='utf-8'): """ Reads the file that was output by a seperate task in the same dag. :param str data_dependency_key: The key (business logic name) for the upstream dependency. This will get the value from the self.data_dependencies dictionary to determine the file to read from. :param str dag_id: Defaults to the current DAG id. :param str encoding: The file encoding to use. Defaults to 'utf-8'. :return: Result of reading the file :rtype: str """ if dag_id is None: dag_id = self.task_instance.dag_id task_id = self.data_dependencies[data_dependency_key] return self.storage.read(dag_id, task_id, self.date, encoding=encoding) def read_upstream_pandas_csv(self, data_dependency_key, dag_id=None, encoding='utf-8'): """ Reads a csv file from upstream into a pandas DataFrame. Specifically reads a csv into memory as a pandas dataframe in a standard manner. Reads the data in from a file output by a previous task. :param str data_dependency_key: The key (business logic name) for the upstream dependency. This will get the value from the self.data_dependencies dictionary to determine the file to read from. :param str dag_id: Defaults to the current DAG id. :param str encoding: The file encoding to use. Defaults to 'utf-8'. :return: The pandas dataframe. :rtype: :py:obj:`pd.DataFrame` """ # Read the upstream file as a stream, abstracting away storage concerns input_stream = self.get_upstream_stream(data_dependency_key, dag_id) return read_and_clean_csv_to_dataframe( filename_or_stream=input_stream, encoding=encoding ) def read_upstream_json(self, data_dependency_key, dag_id=None, encoding='utf-8'): """ Reads a json file from upstream into a python object. :param str data_dependency_key: The key for the upstream data dependency. This will get the value from the self.data_dependencies dict to determine the file to read. :param str dag_id: Defaults to the current DAG id. :param str encoding: The file encoding. Defaults to 'utf-8'. :return: A python object. """ return json.loads( self.read_upstream_file( data_dependency_key, dag_id, encoding=encoding ) ) def write_file(self, data, content_type='text/plain'): """ Writes the data out to the correct file. :param str data: The data to output. :param str content_type: The Content-Type to use. Currently only used by S3. """ self.storage.write( self.task_instance.dag_id, self.task_instance.task_id, self.date, data, content_type=content_type ) def write_from_stream(self, stream, content_type='text/plain'): self.storage.write_from_stream( self.task_instance.dag_id, self.task_instance.task_id, self.date, stream, content_type=content_type ) def write_timestamp_file(self): """ Writes an output file with the current timestamp. """ json = {'RUN': datetime.datetime.now().isoformat()} self.write_json(json) def write_pandas_csv(self, data): """ Specifically writes a csv from a pandas dataframe to the default output file in a standard manner. :param data: the dataframe to write. """ # When you pass filename=None, the result is returned as a string output = clean_and_write_dataframe_to_csv(data=data, filename=None) self.write_file(output, content_type='text/csv') def write_json(self, data): """ Write a python object to a JSON output file. :param object data: The python object to save. """ # TODO: Kinda weird that we embed the json.dumps() as we do since # it doesn't match the other conveience methods. Consider separating self.write_file(json.dumps(data), content_type='application/json') def run(self, *args, **kwargs): raise NotImplementedError("You must implement the run method for this task class.")
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Starting point for routing EC2 requests. """ from eventlet.green import httplib from oslo.config import cfg import six import six.moves.urllib.parse as urlparse import webob import webob.dec import webob.exc from nova.api.ec2 import apirequest from nova.api.ec2 import ec2utils from nova.api.ec2 import faults from nova.api import validator from nova import context from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import memorycache from nova.openstack.common import timeutils from nova import utils from nova import wsgi LOG = logging.getLogger(__name__) ec2_opts = [ cfg.IntOpt('lockout_attempts', default=5, help='Number of failed auths before lockout.'), cfg.IntOpt('lockout_minutes', default=15, help='Number of minutes to lockout if triggered.'), cfg.IntOpt('lockout_window', default=15, help='Number of minutes for lockout window.'), cfg.StrOpt('keystone_ec2_url', default='http://localhost:5000/v2.0/ec2tokens', help='URL to get token from ec2 request.'), cfg.BoolOpt('ec2_private_dns_show_ip', default=False, help='Return the IP address as private dns hostname in ' 'describe instances'), cfg.BoolOpt('ec2_strict_validation', default=True, help='Validate security group names' ' according to EC2 specification'), cfg.IntOpt('ec2_timestamp_expiry', default=300, help='Time in seconds before ec2 timestamp expires'), ] CONF = cfg.CONF CONF.register_opts(ec2_opts) CONF.import_opt('use_forwarded_for', 'nova.api.auth') ## Fault Wrapper around all EC2 requests ## class FaultWrapper(wsgi.Middleware): """Calls the middleware stack, captures any exceptions into faults.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: LOG.exception(_("FaultWrapper: %s"), unicode(ex)) return faults.Fault(webob.exc.HTTPInternalServerError()) class RequestLogging(wsgi.Middleware): """Access-Log akin logging for all EC2 API requests.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): start = timeutils.utcnow() rv = req.get_response(self.application) self.log_request_completion(rv, req, start) return rv def log_request_completion(self, response, request, start): apireq = request.environ.get('ec2.request', None) if apireq: controller = apireq.controller action = apireq.action else: controller = None action = None ctxt = request.environ.get('nova.context', None) delta = timeutils.utcnow() - start seconds = delta.seconds microseconds = delta.microseconds LOG.info( "%s.%ss %s %s %s %s:%s %s [%s] %s %s", seconds, microseconds, request.remote_addr, request.method, "%s%s" % (request.script_name, request.path_info), controller, action, response.status_int, request.user_agent, request.content_type, response.content_type, context=ctxt) class Lockout(wsgi.Middleware): """Lockout for x minutes on y failed auths in a z minute period. x = lockout_timeout flag y = lockout_window flag z = lockout_attempts flag Uses memcached if lockout_memcached_servers flag is set, otherwise it uses a very simple in-process cache. Due to the simplicity of the implementation, the timeout window is started with the first failed request, so it will block if there are x failed logins within that period. There is a possible race condition where simultaneous requests could sneak in before the lockout hits, but this is extremely rare and would only result in a couple of extra failed attempts. """ def __init__(self, application): """middleware can use fake for testing.""" self.mc = memorycache.get_client() super(Lockout, self).__init__(application) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): access_key = str(req.params['AWSAccessKeyId']) failures_key = "authfailures-%s" % access_key failures = int(self.mc.get(failures_key) or 0) if failures >= CONF.lockout_attempts: detail = _("Too many failed authentications.") raise webob.exc.HTTPForbidden(explanation=detail) res = req.get_response(self.application) if res.status_int == 403: failures = self.mc.incr(failures_key) if failures is None: # NOTE(vish): To use incr, failures has to be a string. self.mc.set(failures_key, '1', time=CONF.lockout_window * 60) elif failures >= CONF.lockout_attempts: LOG.warn(_('Access key %(access_key)s has had %(failures)d ' 'failed authentications and will be locked out ' 'for %(lock_mins)d minutes.'), {'access_key': access_key, 'failures': failures, 'lock_mins': CONF.lockout_minutes}) self.mc.set(failures_key, str(failures), time=CONF.lockout_minutes * 60) return res class EC2KeystoneAuth(wsgi.Middleware): """Authenticate an EC2 request with keystone and convert to context.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): request_id = context.generate_request_id() signature = req.params.get('Signature') if not signature: msg = _("Signature not provided") return faults.ec2_error_response(request_id, "AuthFailure", msg, status=400) access = req.params.get('AWSAccessKeyId') if not access: msg = _("Access key not provided") return faults.ec2_error_response(request_id, "AuthFailure", msg, status=400) # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop('Signature') cred_dict = { 'access': access, 'signature': signature, 'host': req.host, 'verb': req.method, 'path': req.path, 'params': auth_params, } if "ec2" in CONF.keystone_ec2_url: creds = {'ec2Credentials': cred_dict} else: creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}} creds_json = jsonutils.dumps(creds) headers = {'Content-Type': 'application/json'} o = urlparse.urlparse(CONF.keystone_ec2_url) if o.scheme == "http": conn = httplib.HTTPConnection(o.netloc) else: conn = httplib.HTTPSConnection(o.netloc) conn.request('POST', o.path, body=creds_json, headers=headers) response = conn.getresponse() data = response.read() if response.status != 200: if response.status == 401: msg = response.reason else: msg = _("Failure communicating with keystone") return faults.ec2_error_response(request_id, "AuthFailure", msg, status=response.status) result = jsonutils.loads(data) conn.close() try: token_id = result['access']['token']['id'] user_id = result['access']['user']['id'] project_id = result['access']['token']['tenant']['id'] user_name = result['access']['user'].get('name') project_name = result['access']['token']['tenant'].get('name') roles = [role['name'] for role in result['access']['user']['roles']] except (AttributeError, KeyError) as e: LOG.exception(_("Keystone failure: %s") % e) msg = _("Failure communicating with keystone") return faults.ec2_error_response(request_id, "AuthFailure", msg, status=400) remote_address = req.remote_addr if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) catalog = result['access']['serviceCatalog'] ctxt = context.RequestContext(user_id, project_id, user_name=user_name, project_name=project_name, roles=roles, auth_token=token_id, remote_address=remote_address, service_catalog=catalog) req.environ['nova.context'] = ctxt return self.application class NoAuth(wsgi.Middleware): """Add user:project as 'nova.context' to WSGI environ.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if 'AWSAccessKeyId' not in req.params: raise webob.exc.HTTPBadRequest() user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':') project_id = project_id or user_id remote_address = req.remote_addr if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, is_admin=True, remote_address=remote_address) req.environ['nova.context'] = ctx return self.application class Requestify(wsgi.Middleware): def __init__(self, app, controller): super(Requestify, self).__init__(app) self.controller = importutils.import_object(controller) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Version', 'Timestamp'] args = dict(req.params) try: expired = ec2utils.is_ec2_timestamp_expired(req.params, expires=CONF.ec2_timestamp_expiry) if expired: msg = _("Timestamp failed validation.") LOG.exception(msg) raise webob.exc.HTTPForbidden(explanation=msg) # Raise KeyError if omitted action = req.params['Action'] # Fix bug lp:720157 for older (version 1) clients version = req.params['SignatureVersion'] if int(version) == 1: non_args.remove('SignatureMethod') if 'SignatureMethod' in args: args.pop('SignatureMethod') for non_arg in non_args: # Remove, but raise KeyError if omitted args.pop(non_arg) except KeyError: raise webob.exc.HTTPBadRequest() except exception.InvalidRequest as err: raise webob.exc.HTTPBadRequest(explanation=unicode(err)) LOG.debug(_('action: %s'), action) for key, value in args.items(): LOG.debug(_('arg: %(key)s\t\tval: %(value)s'), {'key': key, 'value': value}) # Success! api_request = apirequest.APIRequest(self.controller, action, req.params['Version'], args) req.environ['ec2.request'] = api_request return self.application class Authorizer(wsgi.Middleware): """Authorize an EC2 API request. Return a 401 if ec2.controller and ec2.action in WSGI environ may not be executed in nova.context. """ def __init__(self, application): super(Authorizer, self).__init__(application) self.action_roles = { 'CloudController': { 'DescribeAvailabilityZones': ['all'], 'DescribeRegions': ['all'], 'DescribeSnapshots': ['all'], 'DescribeKeyPairs': ['all'], 'CreateKeyPair': ['all'], 'DeleteKeyPair': ['all'], 'DescribeSecurityGroups': ['all'], 'ImportKeyPair': ['all'], 'AuthorizeSecurityGroupIngress': ['netadmin'], 'RevokeSecurityGroupIngress': ['netadmin'], 'CreateSecurityGroup': ['netadmin'], 'DeleteSecurityGroup': ['netadmin'], 'GetConsoleOutput': ['projectmanager', 'sysadmin'], 'DescribeVolumes': ['projectmanager', 'sysadmin'], 'CreateVolume': ['projectmanager', 'sysadmin'], 'AttachVolume': ['projectmanager', 'sysadmin'], 'DetachVolume': ['projectmanager', 'sysadmin'], 'DescribeInstances': ['all'], 'DescribeAddresses': ['all'], 'AllocateAddress': ['netadmin'], 'ReleaseAddress': ['netadmin'], 'AssociateAddress': ['netadmin'], 'DisassociateAddress': ['netadmin'], 'RunInstances': ['projectmanager', 'sysadmin'], 'TerminateInstances': ['projectmanager', 'sysadmin'], 'RebootInstances': ['projectmanager', 'sysadmin'], 'UpdateInstance': ['projectmanager', 'sysadmin'], 'StartInstances': ['projectmanager', 'sysadmin'], 'StopInstances': ['projectmanager', 'sysadmin'], 'DeleteVolume': ['projectmanager', 'sysadmin'], 'DescribeImages': ['all'], 'DeregisterImage': ['projectmanager', 'sysadmin'], 'RegisterImage': ['projectmanager', 'sysadmin'], 'DescribeImageAttribute': ['all'], 'ModifyImageAttribute': ['projectmanager', 'sysadmin'], 'UpdateImage': ['projectmanager', 'sysadmin'], 'CreateImage': ['projectmanager', 'sysadmin'], }, 'AdminController': { # All actions have the same permission: ['none'] (the default) # superusers will be allowed to run them # all others will get HTTPUnauthorized. }, } @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['nova.context'] controller = req.environ['ec2.request'].controller.__class__.__name__ action = req.environ['ec2.request'].action allowed_roles = self.action_roles[controller].get(action, ['none']) if self._matches_any_role(context, allowed_roles): return self.application else: LOG.audit(_('Unauthorized request for controller=%(controller)s ' 'and action=%(action)s'), {'controller': controller, 'action': action}, context=context) raise webob.exc.HTTPUnauthorized() def _matches_any_role(self, context, roles): """Return True if any role in roles is allowed in context.""" if context.is_admin: return True if 'all' in roles: return True if 'none' in roles: return False return any(role in context.roles for role in roles) class Validator(wsgi.Middleware): def validate_ec2_id(val): if not validator.validate_str()(val): return False try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: return False return True validator.validate_ec2_id = validate_ec2_id validator.DEFAULT_VALIDATOR = { 'instance_id': validator.validate_ec2_id, 'volume_id': validator.validate_ec2_id, 'image_id': validator.validate_ec2_id, 'attribute': validator.validate_str(), 'image_location': validator.validate_image_path, 'public_ip': utils.is_valid_ipv4, 'region_name': validator.validate_str(), 'group_name': validator.validate_str(max_length=255), 'group_description': validator.validate_str(max_length=255), 'size': validator.validate_int(), 'user_data': validator.validate_user_data } def __init__(self, application): super(Validator, self).__init__(application) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if validator.validate(req.environ['ec2.request'].args, validator.DEFAULT_VALIDATOR): return self.application else: raise webob.exc.HTTPBadRequest() def exception_to_ec2code(ex): """Helper to extract EC2 error code from exception. For other than EC2 exceptions (those without ec2_code attribute), use exception name. """ if hasattr(ex, 'ec2_code'): code = ex.ec2_code else: code = type(ex).__name__ return code def ec2_error_ex(ex, req, code=None, message=None, unexpected=False): """Return an EC2 error response based on passed exception and log the exception on an appropriate log level: * DEBUG: expected errors * ERROR: unexpected errors All expected errors are treated as client errors and 4xx HTTP status codes are always returned for them. Unexpected 5xx errors may contain sensitive information, suppress their messages for security. """ if not code: code = exception_to_ec2code(ex) status = getattr(ex, 'code', None) if not status: status = 500 if unexpected: log_fun = LOG.error if ex.args and status < 500: log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s") else: log_msg = _("Unexpected %(ex_name)s raised") else: log_fun = LOG.debug if ex.args: log_msg = _("%(ex_name)s raised: %(ex_str)s") else: log_msg = _("%(ex_name)s raised") # NOTE(jruzicka): For compatibility with EC2 API, treat expected # exceptions as client (4xx) errors. The exception error code is 500 # by default and most exceptions inherit this from NovaException even # though they are actually client errors in most cases. if status >= 500: status = 400 context = req.environ['nova.context'] request_id = context.request_id log_msg_args = { 'ex_name': type(ex).__name__, 'ex_str': unicode(ex) } log_fun(log_msg % log_msg_args, context=context) if ex.args and not message and (not unexpected or status < 500): message = unicode(ex.args[0]) if unexpected: # Log filtered environment for unexpected errors. env = req.environ.copy() for k in env.keys(): if not isinstance(env[k], six.string_types): env.pop(k) log_fun(_('Environment: %s') % jsonutils.dumps(env)) if not message: message = _('Unknown error occurred.') return faults.ec2_error_response(request_id, code, message, status=status) class Executor(wsgi.Application): """Execute an EC2 API request. Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and 'ec2.action_args' (all variables in WSGI environ.) Returns an XML response, or a 400 upon failure. """ @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['nova.context'] api_request = req.environ['ec2.request'] try: result = api_request.invoke(context) except exception.InstanceNotFound as ex: ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id']) message = ex.msg_fmt % {'instance_id': ec2_id} return ec2_error_ex(ex, req, message=message) except exception.VolumeNotFound as ex: ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id']) message = ex.msg_fmt % {'volume_id': ec2_id} return ec2_error_ex(ex, req, message=message) except exception.SnapshotNotFound as ex: ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id']) message = ex.msg_fmt % {'snapshot_id': ec2_id} return ec2_error_ex(ex, req, message=message) except (exception.CannotDisassociateAutoAssignedFloatingIP, exception.FloatingIpAssociated, exception.FloatingIpNotFound, exception.ImageNotActive, exception.InvalidInstanceIDMalformed, exception.InvalidKeypair, exception.InvalidParameterValue, exception.InvalidPortRange, exception.InvalidVolume, exception.KeyPairExists, exception.KeypairNotFound, exception.MissingParameter, exception.NoFloatingIpInterface, exception.NoMoreFixedIps, exception.NotAuthorized, exception.QuotaError, exception.SecurityGroupExists, exception.SecurityGroupLimitExceeded, exception.SecurityGroupRuleExists, exception.VolumeUnattached, # Following aren't translated to valid EC2 errors. exception.ImageNotFound, exception.ImageNotFoundEC2, exception.InvalidAttribute, exception.InvalidRequest, exception.NotFound) as ex: return ec2_error_ex(ex, req) except Exception as ex: return ec2_error_ex(ex, req, unexpected=True) else: resp = webob.Response() resp.status = 200 resp.headers['Content-Type'] = 'text/xml' resp.body = str(result) return resp
import warnings import _zeros from numpy import finfo, sign, sqrt _iter = 100 _xtol = 1e-12 # not actually used at the moment _rtol = finfo(float).eps * 2 __all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth'] CONVERGED = 'converged' SIGNERR = 'sign error' CONVERR = 'convergence error' flag_map = {0 : CONVERGED, -1 : SIGNERR, -2 : CONVERR} class RootResults(object): def __init__(self, root, iterations, function_calls, flag): self.root = root self.iterations = iterations self.function_calls = function_calls self.converged = flag == 0 try: self.flag = flag_map[flag] except KeyError: self.flag = 'unknown error %d' % (flag,) def results_c(full_output, r): if full_output: x, funcalls, iterations, flag = r results = RootResults(root=x, iterations=iterations, function_calls=funcalls, flag=flag) return x, results else: return r # Newton-Raphson method def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50, fprime2=None): """ Find a zero using the Newton-Raphson or secant method. Find a zero of the function `func` given a nearby starting point `x0`. The Newton-Raphson method is used if the derivative `fprime` of `func` is provided, otherwise the secant method is used. If the second order derivate `fprime2` of `func` is provided, parabolic Halley's method is used. Parameters ---------- func : function The function whose zero is wanted. It must be a function of a single variable of the form f(x,a,b,c...), where a,b,c... are extra arguments that can be passed in the `args` parameter. x0 : float An initial estimate of the zero that should be somewhere near the actual zero. fprime : function, optional The derivative of the function when available and convenient. If it is None (default), then the secant method is used. args : tuple, optional Extra arguments to be used in the function call. tol : float, optional The allowable error of the zero value. maxiter : int, optional Maximum number of iterations. fprime2 : function, optional The second order derivative of the function when available and convenient. If it is None (default), then the normal Newton-Raphson or the secant method is used. If it is given, parabolic Halley's method is used. Returns ------- zero : float Estimated location where function is zero. See Also -------- brentq, brenth, ridder, bisect fsolve : find zeroes in n dimensions. Notes ----- The convergence rate of the Newton-Raphson method is quadratic, the Halley method is cubic, and the secant method is sub-quadratic. This means that if the function is well behaved the actual error in the estimated zero is approximately the square (cube for Halley) of the requested tolerance up to roundoff error. However, the stopping criterion used here is the step size and there is no guarantee that a zero has been found. Consequently the result should be verified. Safer algorithms are brentq, brenth, ridder, and bisect, but they all require that the root first be bracketed in an interval where the function changes sign. The brentq algorithm is recommended for general use in one dimensional problems when such an interval has been found. """ if fprime is not None: # Newton-Rapheson method # Multiply by 1.0 to convert to floating point. We don't use float(x0) # so it still works if x0 is complex. p0 = 1.0 * x0 fder2 = 0 for iter in range(maxiter): myargs = (p0,) + args fder = fprime(*myargs) if fder == 0: msg = "derivative was zero." warnings.warn(msg, RuntimeWarning) return p0 fval = func(*myargs) if fprime2 is not None: fder2 = fprime2(*myargs) if fder2 == 0: # Newton step p = p0 - fval / fder else: # Parabolic Halley's method discr = fder ** 2 - 2 * fval * fder2 if discr < 0: p = p0 - fder / fder2 else: p = p0 - 2*fval / (fder + sign(fder) * sqrt(discr)) if abs(p - p0) < tol: return p p0 = p else: # Secant method p0 = x0 if x0 >= 0: p1 = x0*(1 + 1e-4) + 1e-4 else: p1 = x0*(1 + 1e-4) - 1e-4 q0 = func(*((p0,) + args)) q1 = func(*((p1,) + args)) for iter in range(maxiter): if q1 == q0: if p1 != p0: msg = "Tolerance of %s reached" % (p1 - p0) warnings.warn(msg, RuntimeWarning) return (p1 + p0)/2.0 else: p = p1 - q1*(p1 - p0)/(q1 - q0) if abs(p - p1) < tol: return p p0 = p1 q0 = q1 p1 = p q1 = func(*((p1,) + args)) msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p) raise RuntimeError(msg) def bisect(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find root of f in [a,b]. Basic bisection routine to find a zero of the function f between the arguments a and b. f(a) and f(b) can not have the same signs. Slow but sure. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The routine converges when a root is known to lie within xtol of the value return. Should be >= 0. The routine modifies this to take into account the relative precision of doubles. maxiter : number, optional if convergence is not achieved in maxiter iterations, and error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- brentq, brenth, bisect, newton fixed_point : scalar fixed-point finder fsolve : n-dimensional root-finding """ if type(args) != type(()) : args = (args,) r = _zeros._bisect(f,a,b,xtol,maxiter,args,full_output,disp) return results_c(full_output, r) def ridder(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find a root of a function in an interval. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The routine converges when a root is known to lie within xtol of the value return. Should be >= 0. The routine modifies this to take into account the relative precision of doubles. maxiter : number, optional if convergence is not achieved in maxiter iterations, and error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- brentq, brenth, bisect, newton : one-dimensional root-finding fixed_point : scalar fixed-point finder Notes ----- Uses [Ridders1979]_ method to find a zero of the function `f` between the arguments `a` and `b`. Ridders' method is faster than bisection, but not generally as fast as the Brent rountines. [Ridders1979]_ provides the classic description and source of the algorithm. A description can also be found in any recent edition of Numerical Recipes. The routine used here diverges slightly from standard presentations in order to be a bit more careful of tolerance. References ---------- .. [Ridders1979] Ridders, C. F. J. "A New Algorithm for Computing a Single Root of a Real Continuous Function." IEEE Trans. Circuits Systems 26, 979-980, 1979. """ if type(args) != type(()) : args = (args,) r = _zeros._ridder(f,a,b,xtol,maxiter,args,full_output,disp) return results_c(full_output, r) def brentq(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """ Find a root of a function in given interval. Return float, a zero of `f` between `a` and `b`. `f` must be a continuous function, and [a,b] must be a sign changing interval. Description: Uses the classic Brent (1973) method to find a zero of the function `f` on the sign changing interval [a , b]. Generally considered the best of the rootfinding routines here. It is a safe version of the secant method that uses inverse quadratic extrapolation. Brent's method combines root bracketing, interval bisection, and inverse quadratic interpolation. It is sometimes known as the van Wijngaarden-Deker-Brent method. Brent (1973) claims convergence is guaranteed for functions computable within [a,b]. [Brent1973]_ provides the classic description of the algorithm. Another description can be found in a recent edition of Numerical Recipes, including [PressEtal1992]_. Another description is at http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to understand the algorithm just by reading our code. Our code diverges a bit from standard presentations: we choose a different formula for the extrapolation step. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The routine converges when a root is known to lie within xtol of the value return. Should be >= 0. The routine modifies this to take into account the relative precision of doubles. maxiter : number, optional if convergence is not achieved in maxiter iterations, and error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- multivariate local optimizers `fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg` nonlinear least squares minimizer `leastsq` constrained multivariate optimizers `fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` global optimizers `anneal`, `brute` local scalar minimizers `fminbound`, `brent`, `golden`, `bracket` n-dimensional root-finding `fsolve` one-dimensional root-finding `brentq`, `brenth`, `ridder`, `bisect`, `newton` scalar fixed-point finder `fixed_point` Notes ----- `f` must be continuous. f(a) and f(b) must have opposite signs. References ---------- .. [Brent1973] Brent, R. P., *Algorithms for Minimization Without Derivatives*. Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4. .. [PressEtal1992] Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T. *Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed. Cambridge, England: Cambridge University Press, pp. 352-355, 1992. Section 9.3: "Van Wijngaarden-Dekker-Brent Method." """ if type(args) != type(()) : args = (args,) r = _zeros._brentq(f,a,b,xtol,maxiter,args,full_output,disp) return results_c(full_output, r) def brenth(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True): """Find root of f in [a,b]. A variation on the classic Brent routine to find a zero of the function f between the arguments a and b that uses hyperbolic extrapolation instead of inverse quadratic extrapolation. There was a paper back in the 1980's ... f(a) and f(b) can not have the same signs. Generally on a par with the brent routine, but not as heavily tested. It is a safe version of the secant method that uses hyperbolic extrapolation. The version here is by Chuck Harris. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : number One end of the bracketing interval [a,b]. b : number The other end of the bracketing interval [a,b]. xtol : number, optional The routine converges when a root is known to lie within xtol of the value return. Should be >= 0. The routine modifies this to take into account the relative precision of doubles. maxiter : number, optional if convergence is not achieved in maxiter iterations, and error is raised. Must be >= 0. args : tuple, optional containing extra arguments for the function `f`. `f` is called by ``apply(f, (x)+args)``. full_output : bool, optional If `full_output` is False, the root is returned. If `full_output` is True, the return value is ``(x, r)``, where `x` is the root, and `r` is a RootResults object. disp : bool, optional If True, raise RuntimeError if the algorithm didn't converge. Returns ------- x0 : float Zero of `f` between `a` and `b`. r : RootResults (present if ``full_output = True``) Object containing information about the convergence. In particular, ``r.converged`` is True if the routine converged. See Also -------- fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers leastsq : nonlinear least squares minimizer fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers anneal, brute : global optimizers fminbound, brent, golden, bracket : local scalar minimizers fsolve : n-dimensional root-finding brentq, brenth, ridder, bisect, newton : one-dimensional root-finding fixed_point : scalar fixed-point finder """ if type(args) != type(()) : args = (args,) r = _zeros._brenth(f,a, b, xtol, maxiter, args, full_output, disp) return results_c(full_output, r)
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from datetime import datetime from helper.utils import TestUtils as tu from mushroom_rl.core import Agent from mushroom_rl.algorithms.value import * from mushroom_rl.approximators.parametric import LinearApproximator, TorchApproximator from mushroom_rl.core import Core from mushroom_rl.environments.grid_world import GridWorld from mushroom_rl.environments.gym_env import Gym from mushroom_rl.features import Features from mushroom_rl.features.tiles import Tiles from mushroom_rl.policy.td_policy import EpsGreedy from mushroom_rl.utils.parameters import Parameter class Network(nn.Module): def __init__(self, input_shape, output_shape, **kwargs): super().__init__() n_input = input_shape[-1] n_output = output_shape[0] self._h1 = nn.Linear(n_input, n_output) nn.init.xavier_uniform_(self._h1.weight, gain=nn.init.calculate_gain('relu')) def forward(self, state, action=None): q = F.relu(self._h1(torch.squeeze(state, 1).float())) if action is None: return q else: action = action.long() q_acted = torch.squeeze(q.gather(1, action)) return q_acted def initialize(): np.random.seed(1) torch.manual_seed(1) return EpsGreedy(Parameter(1)), GridWorld(2, 2, start=(0, 0), goal=(1, 1)),\ Gym(name='MountainCar-v0', horizon=np.inf, gamma=1.) def test_q_learning(): pi, mdp, _ = initialize() agent = QLearning(mdp.info, pi, Parameter(.5)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[7.82042542, 8.40151978, 7.64961548, 8.82421875], [8.77587891, 9.921875, 7.29316406, 8.68359375], [7.7203125, 7.69921875, 4.5, 9.84375], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) def test_q_learning_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = QLearning(mdp.info, pi, Parameter(.5)) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_double_q_learning(): pi, mdp, _ = initialize() agent = DoubleQLearning(mdp.info, pi, Parameter(.5)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q_0 = np.array([[2.6578125, 6.94757812, 3.73359375, 7.171875], [2.25, 7.5, 3.0375, 3.375], [3.0375, 5.4140625, 2.08265625, 8.75], [0., 0., 0., 0.]]) test_q_1 = np.array([[2.72109375, 4.5, 4.36640625, 6.609375], [4.5, 9.375, 4.49296875, 4.5], [1.0125, 5.0625, 5.625, 8.75], [0., 0., 0., 0.]]) assert np.allclose(agent.Q[0].table, test_q_0) assert np.allclose(agent.Q[1].table, test_q_1) def test_double_q_learning_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = DoubleQLearning(mdp.info, pi, Parameter(.5)) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_weighted_q_learning(): pi, mdp, _ = initialize() agent = WeightedQLearning(mdp.info, pi, Parameter(.5)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[8.00815525, 4.09343205, 7.94406811, 8.96270031], [8.31597686, 9.99023438, 6.42921521, 7.70471909], [7.26069091, 0.87610663, 3.70440836, 9.6875], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) def test_weighted_q_learning_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = WeightedQLearning(mdp.info, pi, Parameter(.5)) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_maxmin_q_learning(): pi, mdp, _ = initialize() agent = MaxminQLearning(mdp.info, pi, Parameter(.5), n_tables=4) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[0., 0., 0., 0.], [0., 7.5, 0., 0.], [0., 0., 0., 5.], [0., 0., 0., 0.]]) assert np.allclose(agent.Q[0].table, test_q) def test_maxmin_q_learning_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = MaxminQLearning(mdp.info, pi, Parameter(.5), n_tables=5) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_speedy_q_learning(): pi, mdp, _ = initialize() agent = SpeedyQLearning(mdp.info, pi, Parameter(.5)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[7.82042542, 8.40151978, 7.64961548, 8.82421875], [8.77587891, 9.921875, 7.29316406, 8.68359375], [7.7203125, 7.69921875, 4.5, 9.84375], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) def test_speedy_q_learning_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = SpeedyQLearning(mdp.info, pi, Parameter(.5)) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_sarsa(): pi, mdp, _ = initialize() agent = SARSA(mdp.info, pi, Parameter(.1)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[4.31368701e-2, 3.68037689e-1, 4.14040445e-2, 1.64007642e-1], [6.45491436e-1, 4.68559000, 8.07603735e-2, 1.67297938e-1], [4.21445838e-2, 3.71538042e-3, 0., 3.439], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) def test_sarsa_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = SARSA(mdp.info, pi, Parameter(.1)) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_q_lambda(): pi, mdp, _ = initialize() agent = QLambda(mdp.info, pi, Parameter(.1), .9) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[5.07310744, 5.6013244, 3.42130445, 5.90556511], [3.4410511, 5.217031, 2.51555213, 4.0616156], [3.76728025, 2.17726915, 1.0955066, 4.68559], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) def test_q_lambda_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = QLambda(mdp.info, pi, Parameter(.1), .9) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_sarsa_lambda_discrete(): pi, mdp, _ = initialize() agent = SARSALambda(mdp.info, pi, Parameter(.1), .9) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[1.88093529, 2.42467354, 1.07390687, 2.39288988], [2.46058746, 4.68559, 1.5661933, 2.56586018], [1.24808966, 0.91948465, 0.47734152, 3.439], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) def test_sarsa_lambda_discrete_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = SARSALambda(mdp.info, pi, Parameter(.1), .9) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_sarsa_lambda_continuous_linear(): pi, _, mdp_continuous = initialize() mdp_continuous.seed(1) n_tilings = 1 tilings = Tiles.generate(n_tilings, [2, 2], mdp_continuous.info.observation_space.low, mdp_continuous.info.observation_space.high) features = Features(tilings=tilings) approximator_params = dict( input_shape=(features.size,), output_shape=(mdp_continuous.info.action_space.n,), n_actions=mdp_continuous.info.action_space.n ) agent = SARSALambdaContinuous(mdp_continuous.info, pi, LinearApproximator, Parameter(.1), .9, features=features, approximator_params=approximator_params) core = Core(agent, mdp_continuous) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_w = np.array([-16.38428419, 0., -14.31250136, 0., -15.68571525, 0., -10.15663821, 0., -15.0545445, 0., -8.3683605, 0.]) assert np.allclose(agent.Q.get_weights(), test_w) def test_sarsa_lambda_continuous_linear_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, _, mdp_continuous = initialize() mdp_continuous.seed(1) n_tilings = 1 tilings = Tiles.generate(n_tilings, [2, 2], mdp_continuous.info.observation_space.low, mdp_continuous.info.observation_space.high) features = Features(tilings=tilings) approximator_params = dict( input_shape=(features.size,), output_shape=(mdp_continuous.info.action_space.n,), n_actions=mdp_continuous.info.action_space.n ) agent_save = SARSALambdaContinuous(mdp_continuous.info, pi, LinearApproximator, Parameter(.1), .9, features=features, approximator_params=approximator_params) core = Core(agent_save, mdp_continuous) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_sarsa_lambda_continuous_nn(): pi, _, mdp_continuous = initialize() mdp_continuous.seed(1) features = Features( n_outputs=mdp_continuous.info.observation_space.shape[0] ) approximator_params = dict( input_shape=(features.size,), output_shape=(mdp_continuous.info.action_space.n,), network=Network, n_actions=mdp_continuous.info.action_space.n ) agent = SARSALambdaContinuous(mdp_continuous.info, pi, TorchApproximator, Parameter(.1), .9, features=features, approximator_params=approximator_params) core = Core(agent, mdp_continuous) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_w = np.array([-0.18968964, 0.4296857, 0.52967095, 0.5674884, -0.12784956, -0.10572472, -0.14546978, -0.67001086, -0.93925357]) assert np.allclose(agent.Q.get_weights(), test_w) def test_sarsa_lambda_continuous_nn_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, _, mdp_continuous = initialize() mdp_continuous.seed(1) features = Features( n_outputs=mdp_continuous.info.observation_space.shape[0] ) approximator_params = dict( input_shape=(features.size,), output_shape=(mdp_continuous.info.action_space.n,), network=Network, n_actions=mdp_continuous.info.action_space.n ) agent_save = SARSALambdaContinuous(mdp_continuous.info, pi, TorchApproximator, Parameter(.1), .9, features=features, approximator_params=approximator_params) core = Core(agent_save, mdp_continuous) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_expected_sarsa(): pi, mdp, _ = initialize() agent = ExpectedSARSA(mdp.info, pi, Parameter(.1)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[0.10221208, 0.48411449, 0.07688765, 0.64002317], [0.58525881, 5.217031, 0.06047094, 0.48214145], [0.08478224, 0.28873536, 0.06543094, 4.68559], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) def test_expected_sarsa_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = ExpectedSARSA(mdp.info, pi, Parameter(.1)) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_true_online_sarsa_lambda(): pi, _, mdp_continuous = initialize() mdp_continuous.seed(1) n_tilings = 1 tilings = Tiles.generate(n_tilings, [2, 2], mdp_continuous.info.observation_space.low, mdp_continuous.info.observation_space.high) features = Features(tilings=tilings) approximator_params = dict( input_shape=(features.size,), output_shape=(mdp_continuous.info.action_space.n,), n_actions=mdp_continuous.info.action_space.n ) agent = TrueOnlineSARSALambda(mdp_continuous.info, pi, Parameter(.1), .9, features=features, approximator_params=approximator_params) core = Core(agent, mdp_continuous) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_w = np.array([-17.27410736, 0., -15.04386343, 0., -16.6551805, 0., -11.31383707, 0., -16.11782002, 0., -9.6927357, 0.]) assert np.allclose(agent.Q.get_weights(), test_w) def test_true_online_sarsa_lambda_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, _, mdp_continuous = initialize() mdp_continuous.seed(1) n_tilings = 1 tilings = Tiles.generate(n_tilings, [2, 2], mdp_continuous.info.observation_space.low, mdp_continuous.info.observation_space.high) features = Features(tilings=tilings) approximator_params = dict( input_shape=(features.size,), output_shape=(mdp_continuous.info.action_space.n,), n_actions=mdp_continuous.info.action_space.n ) agent_save = TrueOnlineSARSALambda(mdp_continuous.info, pi, Parameter(.1), .9, features=features, approximator_params=approximator_params) core = Core(agent_save, mdp_continuous) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_r_learning(): pi, mdp, _ = initialize() agent = RLearning(mdp.info, pi, Parameter(.1), Parameter(.5)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[-6.19137991, -3.9368055, -5.11544257, -3.43673781], [-2.52319391, 1.92201829, -2.77602918, -2.45972955], [-5.38824415, -2.43019918, -1.09965936, 2.04202511], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) def test_r_learning_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = RLearning(mdp.info, pi, Parameter(.1), Parameter(.5)) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr) def test_rq_learning(): pi, mdp, _ = initialize() agent = RQLearning(mdp.info, pi, Parameter(.1), beta=Parameter(.5)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[0.32411217, 2.9698436, 0.46474438, 1.10269504], [2.99505139, 5.217031, 0.40933461, 0.37687883], [0.41942675, 0.32363486, 0., 4.68559], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) agent = RQLearning(mdp.info, pi, Parameter(.1), delta=Parameter(.5)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[1.04081115e-2, 5.14662188e-1, 1.73951634e-2, 1.24081875e-01], [0., 2.71, 1.73137500e-4, 4.10062500e-6], [0., 4.50000000e-2, 0., 4.68559], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) agent = RQLearning(mdp.info, pi, Parameter(.1), off_policy=True, beta=Parameter(.5)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[3.55204022, 4.54235939, 3.42601165, 2.95170908], [2.73877031, 3.439, 2.42031528, 2.86634531], [3.43274708, 3.8592342, 3.72637395, 5.217031], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) agent = RQLearning(mdp.info, pi, Parameter(.1), off_policy=True, delta=Parameter(.5)) core = Core(agent, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) test_q = np.array([[0.18947806, 1.57782254, 0.21911489, 1.05197011], [0.82309759, 5.217031, 0.04167492, 0.61472604], [0.23620541, 0.59828262, 1.25299991, 5.217031], [0., 0., 0., 0.]]) assert np.allclose(agent.Q.table, test_q) def test_rq_learning_save(tmpdir): agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f")) pi, mdp, _ = initialize() agent_save = RQLearning(mdp.info, pi, Parameter(.1), beta=Parameter(.5)) core = Core(agent_save, mdp) # Train core.learn(n_steps=100, n_steps_per_fit=1, quiet=True) agent_save.save(agent_path) agent_load = Agent.load(agent_path) for att, method in vars(agent_save).items(): save_attr = getattr(agent_save, att) load_attr = getattr(agent_load, att) tu.assert_eq(save_attr, load_attr)
# Copyright (C) 2013-2015 MetaMorph Software, Inc # Permission is hereby granted, free of charge, to any person obtaining a # copy of this data, including any software or models in source or binary # form, as well as any drawings, specifications, and documentation # (collectively "the Data"), to deal in the Data without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Data, and to # permit persons to whom the Data is furnished to do so, subject to the # following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Data. # THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA. # ======================= # This version of the META tools is a fork of an original version produced # by Vanderbilt University's Institute for Software Integrated Systems (ISIS). # Their license statement: # Copyright (C) 2011-2014 Vanderbilt University # Developed with the sponsorship of the Defense Advanced Research Projects # Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights # as defined in DFARS 252.227-7013. # Permission is hereby granted, free of charge, to any person obtaining a # copy of this data, including any software or models in source or binary # form, as well as any drawings, specifications, and documentation # (collectively "the Data"), to deal in the Data without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Data, and to # permit persons to whom the Data is furnished to do so, subject to the # following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Data. # THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA. # http://redsolo.blogspot.com/2007/11/hudson-embraces-python.html # http://appfusedjango.googlecode.com/svn/trunk/tests/xmlrunner.py """ XML Test Runner for PyUnit """ # Written by Sebastian Rittau <srittau@jroger.in-berlin.de> and placed in # the Public Domain. With contributions by Paolo Borelli. __revision__ = "$Id: /private/python/stdlib/xmlrunner.py 16654 2007-11-12T12:46:35.368945Z srittau $" import os.path import re import sys import time import traceback import unittest from StringIO import StringIO from xml.sax.saxutils import escape from StringIO import StringIO def strip_ml_tags(in_text): """Description: Removes all HTML/XML-like tags from the input text. Inputs: s --> string of text Outputs: text string without the tags # doctest unit testing framework >>> test_text = "Keep this Text <remove><me /> KEEP </remove> 123" >>> strip_ml_tags(test_text) 'Keep this Text KEEP 123' """ # convert in_text to a mutable object (e.g. list) s_list = list(in_text) i,j = 0,0 while i < len(s_list): # iterate until a left-angle bracket is found if s_list[i] == '<': while s_list[i] != '>': # pop everything from the the left-angle bracket until the right-angle bracket s_list.pop(i) # pops the right-angle bracket, too s_list.pop(i) else: i=i+1 # convert the list back into text join_char='' return join_char.join(s_list) class _TestInfo(object): """Information about a particular test. Used by _XMLTestResult. """ def __init__(self, test, time): (self._class, self._method) = test.id().rsplit(".", 1) self._time = time self._error = None self._failure = None @staticmethod def create_success(test, time): """Create a _TestInfo instance for a successful test.""" return _TestInfo(test, time) @staticmethod def create_failure(test, time, failure): """Create a _TestInfo instance for a failed test.""" info = _TestInfo(test, time) info._failure = failure return info @staticmethod def create_error(test, time, error): """Create a _TestInfo instance for an erroneous test.""" info = _TestInfo(test, time) info._error = error return info def print_report(self, stream): """Print information about this test case in XML format to the supplied stream. """ stream.write(' <testcase classname="%(class)s" name="%(method)s" time="%(time).4f">' % \ { "class": self._class, "method": self._method, "time": self._time, }) if self._failure != None: self._print_error(stream, 'failure', self._failure) if self._error != None: self._print_error(stream, 'error', self._error) stream.write('</testcase>\n') def _print_error(self, stream, tagname, error): """Print information from a failure or error to the supplied stream.""" text = escape(str(error[1])) stream.write('\n') stream.write(' <%s type="%s">%s\n' \ % (tagname, str(error[0]).strip("<>"), strip_ml_tags(text))) tb_stream = StringIO() traceback.print_tb(error[2], None, tb_stream) stream.write(escape(tb_stream.getvalue())) stream.write(' </%s>\n' % tagname) stream.write(' ') class _XMLTestResult(unittest.TestResult): """A test result class that stores result as XML. Used by XMLTestRunner. """ def __init__(self, classname): unittest.TestResult.__init__(self) self._test_name = classname self._start_time = None self._tests = [] self._error = None self._failure = None def startTest(self, test): unittest.TestResult.startTest(self, test) self._error = None self._failure = None self._start_time = time.time() def stopTest(self, test): time_taken = time.time() - self._start_time unittest.TestResult.stopTest(self, test) if self._error: info = _TestInfo.create_error(test, time_taken, self._error) elif self._failure: info = _TestInfo.create_failure(test, time_taken, self._failure) else: info = _TestInfo.create_success(test, time_taken) self._tests.append(info) def addError(self, test, err): unittest.TestResult.addError(self, test, err) self._error = err def addFailure(self, test, err): unittest.TestResult.addFailure(self, test, err) self._failure = err def print_report(self, stream, time_taken, out, err): """Prints the XML report to the supplied stream. The time the tests took to perform as well as the captured standard output and standard error streams must be passed in.a """ stream.write('<testsuite errors="%(e)d" failures="%(f)d" ' % \ { "e": len(self.errors), "f": len(self.failures) }) stream.write('name="%(n)s" tests="%(t)d" time="%(time).3f">\n' % \ { "n": self._test_name, "t": self.testsRun, "time": time_taken, }) for info in self._tests: info.print_report(stream) stream.write(' <system-out><![CDATA[%s]]></system-out>\n' % out) stream.write(' <system-err><![CDATA[%s]]></system-err>\n' % err) stream.write('</testsuite>\n') class XMLTestRunner(object): """A test runner that stores results in XML format compatible with JUnit. XMLTestRunner(stream=None) -> XML test runner The XML file is written to the supplied stream. If stream is None, the results are stored in a file called TEST-<module>.<class>.xml in the current working directory (if not overridden with the path property), where <module> and <class> are the module and class name of the test class. """ def __init__(self, stream=None): self._stream = stream self._path = "." def run(self, test): """Run the given test case or test suite.""" class_ = test.__class__ classname = class_.__module__ + "." + class_.__name__ if self._stream == None: filename = "TEST-%s.xml" % classname stream = file(os.path.join(self._path, filename), "w") stream.write('<?xml version="1.0" encoding="utf-8"?>\n') else: stream = self._stream result = _XMLTestResult(classname) start_time = time.time() # TODO: Python 2.5: Use the with statement old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = StringIO() sys.stderr = StringIO() try: test(result) try: out_s = sys.stdout.getvalue() except AttributeError: out_s = "" try: err_s = sys.stderr.getvalue() except AttributeError: err_s = "" finally: sys.stdout = old_stdout sys.stderr = old_stderr time_taken = time.time() - start_time result.print_report(stream, time_taken, out_s, err_s) if self._stream == None: stream.close() return result def _set_path(self, path): self._path = path path = property(lambda self: self._path, _set_path, None, """The path where the XML files are stored. This property is ignored when the XML file is written to a file stream.""") class XMLTestRunnerTest(unittest.TestCase): def setUp(self): self._stream = StringIO() def _try_test_run(self, test_class, expected): """Run the test suite against the supplied test class and compare the XML result against the expected XML string. Fail if the expected string doesn't match the actual string. All time attribute in the expected string should have the value "0.000". All error and failure messages are reduced to "Foobar". """ runner = XMLTestRunner(self._stream) runner.run(unittest.makeSuite(test_class)) got = self._stream.getvalue() # Replace all time="X.YYY" attributes by time="0.000" to enable a # simple string comparison. got = re.sub(r'time="\d+\.\d+"', 'time="0.000"', got) # Likewise, replace all failure and error messages by a simple "Foobar" # string. got = re.sub(r'(?s)<failure (.*?)>.*?</failure>', r'<failure \1>Foobar</failure>', got) got = re.sub(r'(?s)<error (.*?)>.*?</error>', r'<error \1>Foobar</error>', got) self.assertEqual(expected, got) def test_no_tests(self): """Regression test: Check whether a test run without any tests matches a previous run. """ class TestTest(unittest.TestCase): pass self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="0" time="0.000"> <system-out><![CDATA[]]></system-out> <system-err><![CDATA[]]></system-err> </testsuite> """) def test_success(self): """Regression test: Check whether a test run with a successful test matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): pass self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000"> <testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase> <system-out><![CDATA[]]></system-out> <system-err><![CDATA[]]></system-err> </testsuite> """) def test_failure(self): """Regression test: Check whether a test run with a failing test matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): self.assert_(False) self._try_test_run(TestTest, """<testsuite errors="0" failures="1" name="unittest.TestSuite" tests="1" time="0.000"> <testcase classname="__main__.TestTest" name="test_foo" time="0.000"> <failure type="exceptions.AssertionError">Foobar</failure> </testcase> <system-out><![CDATA[]]></system-out> <system-err><![CDATA[]]></system-err> </testsuite> """) def test_error(self): """Regression test: Check whether a test run with a erroneous test matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): raise IndexError() self._try_test_run(TestTest, """<testsuite errors="1" failures="0" name="unittest.TestSuite" tests="1" time="0.000"> <testcase classname="__main__.TestTest" name="test_foo" time="0.000"> <error type="exceptions.IndexError">Foobar</error> </testcase> <system-out><![CDATA[]]></system-out> <system-err><![CDATA[]]></system-err> </testsuite> """) def test_stdout_capture(self): """Regression test: Check whether a test run with output to stdout matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): print "Test" self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000"> <testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase> <system-out><![CDATA[Test ]]></system-out> <system-err><![CDATA[]]></system-err> </testsuite> """) def test_stderr_capture(self): """Regression test: Check whether a test run with output to stderr matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): print >>sys.stderr, "Test" self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000"> <testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase> <system-out><![CDATA[]]></system-out> <system-err><![CDATA[Test ]]></system-err> </testsuite> """) class NullStream(object): """A file-like object that discards everything written to it.""" def write(self, buffer): pass def test_unittests_changing_stdout(self): """Check whether the XMLTestRunner recovers gracefully from unit tests that change stdout, but don't change it back properly. """ class TestTest(unittest.TestCase): def test_foo(self): sys.stdout = XMLTestRunnerTest.NullStream() runner = XMLTestRunner(self._stream) runner.run(unittest.makeSuite(TestTest)) def test_unittests_changing_stderr(self): """Check whether the XMLTestRunner recovers gracefully from unit tests that change stderr, but don't change it back properly. """ class TestTest(unittest.TestCase): def test_foo(self): sys.stderr = XMLTestRunnerTest.NullStream() runner = XMLTestRunner(self._stream) runner.run(unittest.makeSuite(TestTest)) class XMLTestProgram(unittest.TestProgram): def runTests(self): if self.testRunner is None: self.testRunner = XMLTestRunner() unittest.TestProgram.runTests(self) main = XMLTestProgram if __name__ == "__main__": main(module=None)
# Copyright (c) 2013 Dell Inc. # Copyright 2013 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Dell EqualLogic Storage.""" import functools import math import random import eventlet from eventlet import greenthread import greenlet from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import excutils from six.moves import range from cinder import exception from cinder.i18n import _, _LE, _LW, _LI from cinder import interface from cinder import ssh_utils from cinder import utils from cinder.volume.drivers import san LOG = logging.getLogger(__name__) eqlx_opts = [ cfg.StrOpt('eqlx_group_name', default='group-0', help='Group name to use for creating volumes. Defaults to ' '"group-0".'), cfg.IntOpt('eqlx_cli_timeout', default=30, help='Timeout for the Group Manager cli command execution. ' 'Default is 30. Note that this option is deprecated ' 'in favour of "ssh_conn_timeout" as ' 'specified in cinder/volume/drivers/san/san.py ' 'and will be removed in M release.'), cfg.IntOpt('eqlx_cli_max_retries', default=5, help='Maximum retry count for reconnection. Default is 5.'), cfg.BoolOpt('eqlx_use_chap', default=False, help='Use CHAP authentication for targets. Note that this ' 'option is deprecated in favour of "use_chap_auth" as ' 'specified in cinder/volume/driver.py and will be ' 'removed in next release.'), cfg.StrOpt('eqlx_chap_login', default='admin', help='Existing CHAP account name. Note that this ' 'option is deprecated in favour of "chap_username" as ' 'specified in cinder/volume/driver.py and will be ' 'removed in next release.'), cfg.StrOpt('eqlx_chap_password', default='password', help='Password for specified CHAP account name. Note that this ' 'option is deprecated in favour of "chap_password" as ' 'specified in cinder/volume/driver.py and will be ' 'removed in the next release', secret=True), cfg.StrOpt('eqlx_pool', default='default', help='Pool in which volumes will be created. Defaults ' 'to "default".') ] CONF = cfg.CONF CONF.register_opts(eqlx_opts) def with_timeout(f): @functools.wraps(f) def __inner(self, *args, **kwargs): timeout = kwargs.pop('timeout', None) gt = eventlet.spawn(f, self, *args, **kwargs) if timeout is None: return gt.wait() else: kill_thread = eventlet.spawn_after(timeout, gt.kill) try: res = gt.wait() except greenlet.GreenletExit: raise exception.VolumeBackendAPIException( data="Command timed out") else: kill_thread.cancel() return res return __inner @interface.volumedriver class DellEQLSanISCSIDriver(san.SanISCSIDriver): """Implements commands for Dell EqualLogic SAN ISCSI management. To enable the driver add the following line to the cinder configuration: volume_driver=cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver Driver's prerequisites are: - a separate volume group set up and running on the SAN - SSH access to the SAN - a special user must be created which must be able to - create/delete volumes and snapshots; - clone snapshots into volumes; - modify volume access records; The access credentials to the SAN are provided by means of the following flags: .. code-block:: ini san_ip=<ip_address> san_login=<user name> san_password=<user password> san_private_key=<file containing SSH private key> Thin provision of volumes is enabled by default, to disable it use: .. code-block:: ini san_thin_provision=false In order to use target CHAP authentication (which is disabled by default) SAN administrator must create a local CHAP user and specify the following flags for the driver: .. code-block:: ini use_chap_auth=True chap_login=<chap_login> chap_password=<chap_password> eqlx_group_name parameter actually represents the CLI prompt message without '>' ending. E.g. if prompt looks like 'group-0>', then the parameter must be set to 'group-0' Version history: .. code-block:: none 1.0 - Initial driver 1.1.0 - Misc fixes 1.2.0 - Deprecated eqlx_cli_timeout infavor of ssh_conn_timeout 1.3.0 - Added support for manage/unmanage volume """ VERSION = "1.3.0" def __init__(self, *args, **kwargs): super(DellEQLSanISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(eqlx_opts) self._group_ip = None self.sshpool = None if self.configuration.eqlx_use_chap is True: LOG.warning(_LW( 'Configuration options eqlx_use_chap, ' 'eqlx_chap_login and eqlx_chap_password are deprecated. Use ' 'use_chap_auth, chap_username and chap_password ' 'respectively for the same.')) self.configuration.use_chap_auth = ( self.configuration.eqlx_use_chap) self.configuration.chap_username = ( self.configuration.eqlx_chap_login) self.configuration.chap_password = ( self.configuration.eqlx_chap_password) if self.configuration.eqlx_cli_timeout: msg = _LW('Configuration option eqlx_cli_timeout ' 'is deprecated and will be removed in M release. ' 'Use ssh_conn_timeout instead.') self.configuration.ssh_conn_timeout = ( self.configuration.eqlx_cli_timeout) versionutils.report_deprecated_feature(LOG, msg) def _get_output(self, chan): out = '' ending = '%s> ' % self.configuration.eqlx_group_name while out.find(ending) == -1: ret = chan.recv(102400) if len(ret) == 0: # According to paramiko.channel.Channel documentation, which # says "If a string of length zero is returned, the channel # stream has closed". So we can confirm that the EQL server # has closed the connection. msg = _("The EQL array has closed the connection.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) out += ret LOG.debug("CLI output\n%s", out) return out.splitlines() def _get_prefixed_value(self, lines, prefix): for line in lines: if line.startswith(prefix): return line[len(prefix):] return @with_timeout def _ssh_execute(self, ssh, command, *arg, **kwargs): transport = ssh.get_transport() chan = transport.open_session() completed = False try: chan.invoke_shell() LOG.debug("Reading CLI MOTD") self._get_output(chan) cmd = 'stty columns 255' LOG.debug("Setting CLI terminal width: '%s'", cmd) chan.send(cmd + '\r') out = self._get_output(chan) LOG.debug("Sending CLI command: '%s'", command) chan.send(command + '\r') out = self._get_output(chan) completed = True if any(ln.startswith(('% Error', 'Error:')) for ln in out): desc = _("Error executing EQL command") cmdout = '\n'.join(out) LOG.error(_LE("%s"), cmdout) raise processutils.ProcessExecutionError( stdout=cmdout, cmd=command, description=desc) return out finally: if not completed: LOG.debug("Timed out executing command: '%s'", command) chan.close() def _run_ssh(self, cmd_list, attempts=1): utils.check_ssh_injection(cmd_list) command = ' '. join(cmd_list) if not self.sshpool: password = self.configuration.san_password privatekey = self.configuration.san_private_key min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn self.sshpool = ssh_utils.SSHPool( self.configuration.san_ip, self.configuration.san_ssh_port, self.configuration.ssh_conn_timeout, self.configuration.san_login, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) try: total_attempts = attempts with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: LOG.info(_LI('EQL-driver: executing "%s".'), command) return self._ssh_execute( ssh, command, timeout=self.configuration.ssh_conn_timeout) except Exception: LOG.exception(_LE('Error running command.')) greenthread.sleep(random.randint(20, 500) / 100.0) msg = (_("SSH Command failed after '%(total_attempts)r' " "attempts : '%(command)s'") % {'total_attempts': total_attempts - attempts, 'command': command}) raise exception.VolumeBackendAPIException(data=msg) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error running SSH command: "%s".'), command) def check_for_setup_error(self): super(DellEQLSanISCSIDriver, self).check_for_setup_error() if self.configuration.eqlx_cli_max_retries < 0: raise exception.InvalidInput( reason=_("eqlx_cli_max_retries must be greater than or " "equal to 0")) def _eql_execute(self, *args, **kwargs): return self._run_ssh( args, attempts=self.configuration.eqlx_cli_max_retries + 1) def _get_volume_data(self, lines): prefix = 'iSCSI target name is ' target_name = self._get_prefixed_value(lines, prefix)[:-1] return self._get_model_update(target_name) def _get_model_update(self, target_name): lun_id = "%s:%s,1 %s 0" % (self._group_ip, '3260', target_name) model_update = {} model_update['provider_location'] = lun_id if self.configuration.use_chap_auth: model_update['provider_auth'] = 'CHAP %s %s' % \ (self.configuration.chap_username, self.configuration.chap_password) return model_update def _get_space_in_gb(self, val): scale = 1.0 part = 'GB' if val.endswith('MB'): scale = 1.0 / 1024 part = 'MB' elif val.endswith('TB'): scale = 1.0 * 1024 part = 'TB' return math.ceil(scale * float(val.partition(part)[0])) def _update_volume_stats(self): """Retrieve stats info from eqlx group.""" LOG.debug('Updating volume stats.') data = {} backend_name = "eqlx" if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'eqlx' data["vendor_name"] = 'Dell' data["driver_version"] = self.VERSION data["storage_protocol"] = 'iSCSI' data['reserved_percentage'] = 0 data['QoS_support'] = False data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 data['multiattach'] = True provisioned_capacity = 0 for line in self._eql_execute('pool', 'select', self.configuration.eqlx_pool, 'show'): if line.startswith('TotalCapacity:'): out_tup = line.rstrip().partition(' ') data['total_capacity_gb'] = self._get_space_in_gb(out_tup[-1]) if line.startswith('FreeSpace:'): out_tup = line.rstrip().partition(' ') data['free_capacity_gb'] = self._get_space_in_gb(out_tup[-1]) if line.startswith('VolumeReserve:'): out_tup = line.rstrip().partition(' ') provisioned_capacity = self._get_space_in_gb(out_tup[-1]) global_capacity = data['total_capacity_gb'] global_free = data['free_capacity_gb'] thin_enabled = self.configuration.san_thin_provision if not thin_enabled: provisioned_capacity = round(global_capacity - global_free, 2) data['provisioned_capacity_gb'] = provisioned_capacity data['max_over_subscription_ratio'] = ( self.configuration.max_over_subscription_ratio) data['thin_provisioning_support'] = thin_enabled data['thick_provisioning_support'] = not thin_enabled self._stats = data def _get_volume_info(self, volume_name): """Get the volume details on the array""" command = ['volume', 'select', volume_name, 'show'] try: data = {} for line in self._eql_execute(*command): if line.startswith('Size:'): out_tup = line.rstrip().partition(' ') data['size'] = self._get_space_in_gb(out_tup[-1]) elif line.startswith('iSCSI Name:'): out_tup = line.rstrip().partition(': ') data['iSCSI_Name'] = out_tup[-1] return data except processutils.ProcessExecutionError: msg = (_("Volume does not exists %s.") % volume_name) LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=volume_name, reason=msg) def _check_volume(self, volume): """Check if the volume exists on the Array.""" command = ['volume', 'select', volume['name'], 'show'] try: self._eql_execute(*command) except processutils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(): if err.stdout.find('does not exist.\n') > -1: LOG.debug('Volume %s does not exist, ' 'it may have already been deleted', volume['name']) raise exception.VolumeNotFound(volume_id=volume['id']) def _parse_connection(self, connector, out): """Returns the correct connection id for the initiator. This parses the cli output from the command 'volume select <volumename> access show' and returns the correct connection id. """ lines = [line for line in out if line != ''] # Every record has 2 lines for i in range(0, len(lines), 2): try: int(lines[i][0]) # sanity check if len(lines[i + 1].split()) == 1: check = lines[i].split()[1] + lines[i + 1].strip() if connector['initiator'] == check: return lines[i].split()[0] except (IndexError, ValueError): pass # skip the line that is not a valid access record return None def do_setup(self, context): """Disable cli confirmation and tune output format.""" try: disabled_cli_features = ('confirmation', 'paging', 'events', 'formatoutput') for feature in disabled_cli_features: self._eql_execute('cli-settings', feature, 'off') for line in self._eql_execute('grpparams', 'show'): if line.startswith('Group-Ipaddress:'): out_tup = line.rstrip().partition(' ') self._group_ip = out_tup[-1] LOG.info(_LI('EQL-driver: Setup is complete, group IP is "%s".'), self._group_ip) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to setup the Dell EqualLogic driver.')) def create_volume(self, volume): """Create a volume.""" try: cmd = ['volume', 'create', volume['name'], "%sG" % (volume['size'])] if self.configuration.eqlx_pool != 'default': cmd.append('pool') cmd.append(self.configuration.eqlx_pool) if self.configuration.san_thin_provision: cmd.append('thin-provision') out = self._eql_execute(*cmd) self.add_multihost_access(volume) return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create volume "%s".'), volume['name']) def add_multihost_access(self, volume): """Add multihost-access to a volume. Needed for live migration.""" try: cmd = ['volume', 'select', volume['name'], 'multihost-access', 'enable'] self._eql_execute(*cmd) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to add multihost-access ' 'for volume "%s".'), volume['name']) def _set_volume_description(self, volume, description): """Set the description of the volume""" try: cmd = ['volume', 'select', volume['name'], 'description', description] self._eql_execute(*cmd) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to set description ' 'for volume "%s".'), volume['name']) def delete_volume(self, volume): """Delete a volume.""" try: self._check_volume(volume) self._eql_execute('volume', 'select', volume['name'], 'offline') self._eql_execute('volume', 'delete', volume['name']) except exception.VolumeNotFound: LOG.warning(_LW('Volume %s was not found while trying to delete ' 'it.'), volume['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to delete ' 'volume "%s".'), volume['name']) def create_snapshot(self, snapshot): """Create snapshot of existing volume on appliance.""" try: out = self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'create-now') prefix = 'Snapshot name is ' snap_name = self._get_prefixed_value(out, prefix) self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'rename', snap_name, snapshot['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create snapshot of volume "%s".'), snapshot['volume_name']) def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other volume's snapshot on appliance.""" try: out = self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'select', snapshot['name'], 'clone', volume['name']) # Extend Volume if needed if out and volume['size'] > snapshot['volume_size']: self.extend_volume(volume, volume['size']) LOG.debug('Volume from snapshot %(name)s resized from ' '%(current_size)sGB to %(new_size)sGB.', {'name': volume['name'], 'current_size': snapshot['volume_size'], 'new_size': volume['size']}) self.add_multihost_access(volume) return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create volume from snapshot "%s".'), snapshot['name']) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" try: src_volume_name = src_vref['name'] out = self._eql_execute('volume', 'select', src_volume_name, 'clone', volume['name']) # Extend Volume if needed if out and volume['size'] > src_vref['size']: self.extend_volume(volume, volume['size']) self.add_multihost_access(volume) return self._get_volume_data(out) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create clone of volume "%s".'), volume['name']) def delete_snapshot(self, snapshot): """Delete volume's snapshot.""" try: self._eql_execute('volume', 'select', snapshot['volume_name'], 'snapshot', 'delete', snapshot['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to delete snapshot %(snap)s of ' 'volume %(vol)s.'), {'snap': snapshot['name'], 'vol': snapshot['volume_name']}) def initialize_connection(self, volume, connector): """Restrict access to a volume.""" try: cmd = ['volume', 'select', volume['name'], 'access', 'create', 'initiator', connector['initiator']] if self.configuration.use_chap_auth: cmd.extend(['authmethod', 'chap', 'username', self.configuration.chap_username]) self._eql_execute(*cmd) iscsi_properties = self._get_iscsi_properties(volume) return { 'driver_volume_type': 'iscsi', 'data': iscsi_properties } except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to initialize connection ' 'to volume "%s".'), volume['name']) def terminate_connection(self, volume, connector, force=False, **kwargs): """Remove access restrictions from a volume.""" try: out = self._eql_execute('volume', 'select', volume['name'], 'access', 'show') connection_id = self._parse_connection(connector, out) if connection_id is not None: self._eql_execute('volume', 'select', volume['name'], 'access', 'delete', connection_id) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to terminate connection ' 'to volume "%s".'), volume['name']) def create_export(self, context, volume, connector): """Create an export of a volume. Driver has nothing to do here for the volume has been exported already by the SAN, right after it's creation. """ pass def ensure_export(self, context, volume): """Ensure an export of a volume. Driver has nothing to do here for the volume has been exported already by the SAN, right after it's creation. We will just make sure that the volume exists on the array and issue a warning. """ try: self._check_volume(volume) except exception.VolumeNotFound: LOG.warning(_LW('Volume %s is not found!, it may have been ' 'deleted.'), volume['name']) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to ensure export of volume "%s".'), volume['name']) def remove_export(self, context, volume): """Remove an export of a volume. Driver has nothing to do here for the volume has been exported already by the SAN, right after it's creation. Nothing to remove since there's nothing exported. """ pass def extend_volume(self, volume, new_size): """Extend the size of the volume.""" try: self._eql_execute('volume', 'select', volume['name'], 'size', "%sG" % new_size) LOG.info(_LI('Volume %(name)s resized from ' '%(current_size)sGB to %(new_size)sGB.'), {'name': volume['name'], 'current_size': volume['size'], 'new_size': new_size}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to extend_volume %(name)s from ' '%(current_size)sGB to %(new_size)sGB.'), {'name': volume['name'], 'current_size': volume['size'], 'new_size': new_size}) def _get_existing_volume_ref_name(self, ref): existing_volume_name = None if 'source-name' in ref: existing_volume_name = ref['source-name'] elif 'source-id' in ref: existing_volume_name = ref['source-id'] else: msg = _('Reference must contain source-id or source-name.') LOG.error(msg) raise exception.InvalidInput(reason=msg) return existing_volume_name def manage_existing(self, volume, existing_ref): """Manage an existing volume on the backend storage.""" existing_volume_name = self._get_existing_volume_ref_name(existing_ref) try: cmd = ['volume', 'rename', existing_volume_name, volume['name']] self._eql_execute(*cmd) self._set_volume_description(volume, '"OpenStack Managed"') self.add_multihost_access(volume) data = self._get_volume_info(volume['name']) updates = self._get_model_update(data['iSCSI_Name']) LOG.info(_LI("Backend volume %(back_vol)s renamed to " "%(vol)s and is now managed by cinder."), {'back_vol': existing_volume_name, 'vol': volume['name']}) return updates except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to manage volume "%s".'), volume['name']) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ existing_volume_name = self._get_existing_volume_ref_name(existing_ref) data = self._get_volume_info(existing_volume_name) return data['size'] def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. :param volume: Cinder volume to unmanage """ try: self._set_volume_description(volume, '"OpenStack UnManaged"') LOG.info(_LI("Virtual volume %(disp)s '%(vol)s' is no " "longer managed."), {'disp': volume['display_name'], 'vol': volume['name']}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to unmanage volume "%s".'), volume['name']) def local_path(self, volume): raise NotImplementedError()
#!/usr/local/bin/Python3 # Based on code created for Udacity Linear Algebra Refresher course import math class Vector(object): SAME_DIMENSION_ERROR = "Vectors must be same dimension" CANNOT_NORMALIZE_ZERO_VECTOR_MSG = "Unable to normalize zero vector" CANNOT_FIND_ANGLE_BETWEEN_VECTORS = "Unable to find angle - at least one vector is Zero Vector" NO_UNIQUE_PARALLEL_COMPONENT_MSG = "Unable to calculate a unique parallel component" NO_UNIQUE_ORTHOGONAL_COMPONENT_MSG = "Unable to calculate a unique orthogonal component" CAN_ONLY_CALCULATE_FOR_2D_OR_3D_VECTORS_MSG = "Function only defined for 2D and 3D vectors" def __init__(self, coordinates): try: if not coordinates: raise ValueError self.coordinates = tuple(coordinates) self.dimension = len(coordinates) except ValueError: raise ValueError("The coordinates must be non-empty") except TypeError: raise TypeError("The coordinates must be an iterable") def __str__(self): # How Vector item should print return "Vector: {}".format(self.coordinates) def __eq__(self, v): # Redefines equality of vectors return self.coordinates == v.coordinates def __add__(self, v): # Redefines addition of vectors if v.dimension != self.dimension: raise ValueError(self.SAME_DIMENSION_ERROR) else: result = [x+y for x, y in zip(self.coordinates, v.coordinates)] # For loop implementation: # result = [] # for i in range(self.dimension): # result.append(self.coordinates[i] + v.coordinates[i]) return Vector(result) def __sub__(self, v): # Redefines subtraction of vectors if v.dimension != self.dimension: raise ValueError(self.SAME_DIMENSION_ERROR) else: result = [x-y for x, y in zip(self.coordinates, v.coordinates)] # For loop implementation: # result = [] # for i in range(self.dimension): # result.append(self.coordinates[i] - v.coordinates[i]) return Vector(result) def __iter__(self): i = 0 while i < self.dimension: yield self.coordinates[i] i += 1 def scale(self, c): # Returns new vector where each item in self is scaled by c scaled = [c*x for x in self.coordinates] return Vector(scaled) def magnitude(self): # Returns the length of self squares = [x**2 for x in self.coordinates] return math.sqrt(sum(squares)) def normalization(self): # Returns the unit vector in same direction as self try: mag = self.magnitude() return self.scale(1.0 / mag) except ZeroDivisionError: raise Exception(self.CANNOT_NORMALIZE_ZERO_VECTOR_MSG) def dot_product(self, v): # Returns the inner or dot product of [a1, a2] and [b1, b2] => a1*b1 + a2*b2 product = [x*y for x, y in zip(self.coordinates, v.coordinates)] return sum(product) def angle(self, v, in_degrees=False, tolerance=1e-10): # Returns the angle between two vectors in either radians or degrees # math.acos() returns angle in radians # math.degrees(rad) converts angle in radians to degrees # math.radians(deg) converts angle in degrees to radians try: u1 = self.normalization() u2 = v.normalization() dot_prod = u1.dot_product(u2) if (abs(dot_prod - 1) < tolerance): # Very close to 1 dot_prod = 1 elif (abs(dot_prod + 1) < tolerance): # Very close to -1 dot_prod = -1 radians = math.acos(dot_prod) if in_degrees: return math.degrees(radians) else: return radians except Exception as e: if str(e) == self.CANNOT_NORMALIZE_ZERO_VECTOR_MSG: raise Exception(self.CANNOT_FIND_ANGLE_BETWEEN_VECTORS) else: raise e def is_zero(self, tolerance=1e-10): # Helper function to check if a vector is the Zero Vector return self.magnitude() < tolerance def is_parallel(self, v, tolerance=1e-10): # Returns Boolean whether vector v is parallel to self return ( self.is_zero() or v.is_zero() or self.angle(v) < tolerance or abs(self.angle(v) - math.pi) < tolerance ) def is_orthogonal(self, v, tolerance=1e-10): # Returns Boolean whether vector v is orthogonol to self return (abs(self.dot_product(v)) < tolerance) def component_parallel_to(self, v): # Returns new vector that's the projection of v onto self vector try: unit = self.normalization() weight = v.dot_product(unit) return unit.scale(weight) except Exception as e: if str(e) == self.CANNOT_NORMALIZE_ZERO_VECTOR_MSG: raise Exception(self.NO_UNIQUE_PARALLEL_COMPONENT_MSG) else: raise e def component_orthogonal_to(self, v): # Returns new vector that's the component of v orthogonal to self try: projection = self.component_parallel_to(v) return v - projection except Exception as e: if str(e) == self.NO_UNIQUE_PARALLEL_COMPONENT_MSG: raise Exception(self.NO_UNIQUE_ORTHOGONAL_COMPONENT_MSG) else: raise e def cross_product(self, v): # Returns new vector representing the cross product of self and v try: x1, y1, z1 = self.coordinates x2, y2, z2 = v.coordinates cross = [ y1*z2 - y2*z1, -(x1*z2 - x2*z1), x1*y2 - x2*y1 ] return Vector(cross) except ValueError as e: msg = str(e) if msg == "not enough values to unpack (expected 3, got 2)": self_embedded_in_R3 = Vector(self.coordinates + (0.0,)) v_embedded_in_R3 = Vector(v.coordinates + (0.0,)) return self_embedded_in_R3.cross_product(v_embedded_in_R3) elif (msg == "too many values to unpack (expected 3)" or msg == "not enough values to unpack (expected 3, got 1)"): raise Exception(self.CAN_ONLY_CALCULATE_FOR_2D_OR_3D_VECTORS_MSG) else: raise e def area_of_parallelogram_with(self, v): # Returns area of parallelogram spanned by self and v cross_vector = self.cross_product(v) return cross_vector.magnitude() def area_of_triangle_with(self, v): # Returns area of triangle spanned by self and v return 0.5 * self.area_of_parallelogram_with(v)
from flask import session, redirect, request from webViews.view import normalView from webViews.dockletrequest import dockletRequest from webViews.dashboard import * from webViews.checkname import checkname import time, re class addClusterView(normalView): template_path = "addCluster.html" @classmethod def get(self): result = dockletRequest.post("/image/list/") images = result.get("images") result = dockletRequest.post("/user/usageQuery/") quota = result.get("quota") usage = result.get("usage") default = result.get("default") restcpu = int(quota['cpu']) - int(usage['cpu']) restmemory = int(quota['memory']) - int(usage['memory']) restdisk = int(quota['disk']) - int(usage['disk']) if restcpu >= int(default['cpu']): defaultcpu = default['cpu'] elif restcpu <= 0: defaultcpu = "0" else: defaultcpu = str(restcpu) if restmemory >= int(default['memory']): defaultmemory = default['memory'] elif restmemory <= 0: defaultmemory = "0" else: defaultmemory = str(restmemory) if restdisk >= int(default['disk']): defaultdisk = default['disk'] elif restdisk <= 0: defaultdisk = "0" else: defaultdisk = str(restdisk) defaultsetting = { 'cpu': defaultcpu, 'memory': defaultmemory, 'disk': defaultdisk } if (result): return self.render(self.template_path, user = session['username'], images = images, quota = quota, usage = usage, defaultsetting = defaultsetting) else: self.error() class createClusterView(normalView): template_path = "dashboard.html" error_path = "error.html" @classmethod def post(self): index1 = self.image.rindex("_") index2 = self.image[:index1].rindex("_") checkname(self.clustername) data = { "clustername": self.clustername, 'imagename': self.image[:index2], 'imageowner': self.image[index2+1:index1], 'imagetype': self.image[index1+1:] } result = dockletRequest.post("/cluster/create/", dict(data, **(request.form))) if(result.get('success', None) == "true"): return redirect("/dashboard/") #return self.render(self.template_path, user = session['username']) else: return self.render(self.error_path, message = result.get('message')) class descriptionImageView(normalView): template_path = "image_description.html" @classmethod def get(self): index1 = self.image.rindex("_") index2 = self.image[:index1].rindex("_") data = { "imagename": self.image[:index2], "imageowner": self.image[index2+1:index1], "imagetype": self.image[index1+1:] } result = dockletRequest.post("/image/description/", data) if(result): description = result.get("message") return self.render(self.template_path, description = description) else: self.error() class scaleoutView(normalView): error_path = "error.html" @classmethod def post(self): index1 = self.image.rindex("_") index2 = self.image[:index1].rindex("_") data = { "clustername": self.clustername, 'imagename': self.image[:index2], 'imageowner': self.image[index2+1:index1], 'imagetype': self.image[index1+1:] } result = dockletRequest.post("/cluster/scaleout/", dict(data, **(request.form))) if(result.get('success', None) == "true"): return redirect("/config/") else: return self.render(self.error_path, message = result.get('message')) class scaleinView(normalView): @classmethod def get(self): data = { "clustername": self.clustername, "containername":self.containername } result = dockletRequest.post("/cluster/scalein/", data) if(result): return redirect("/config/") else: self.error() class listClusterView(normalView): template_path = "listCluster.html" @classmethod def get(self): result = dockletRequest.post("/cluster/list/") clusters = result.get("clusters") if(result): return self.render(self.template_path, user = session['username'], clusters = clusters) else: self.error() class startClusterView(normalView): template_path = "dashboard.html" error_path = "error.html" @classmethod def get(self): data = { "clustername": self.clustername } result = dockletRequest.post("/cluster/start/", data) if(result.get('success', None) == "true"): return redirect("/dashboard/") #return self.render(self.template_path, user = session['username']) else: return self.render(self.error_path, message = result.get('message')) class stopClusterView(normalView): template_path = "dashboard.html" @classmethod def get(self): data = { "clustername": self.clustername } result = dockletRequest.post("/cluster/stop/", data) if(result): return redirect("/dashboard/") else: return self.error() class flushClusterView(normalView): success_path = "opsuccess.html" failed_path = "opfailed.html" @classmethod def get(self): data = { "clustername": self.clustername, "from_lxc": self.containername } result = dockletRequest.post("/cluster/flush/", data) if(result): if result.get('success') == "true": return self.render(self.success_path, user = session['username']) else: return self.render(self.failed_path, user = session['username']) else: self.error() class deleteClusterView(normalView): template_path = "dashboard.html" @classmethod def get(self): data = { "clustername": self.clustername } result = dockletRequest.post("/cluster/delete/", data) if(result): return redirect("/dashboard/") else: return self.error() class detailClusterView(normalView): template_path = "listcontainer.html" @classmethod def get(self): data = { "clustername": self.clustername } result = dockletRequest.post("/cluster/info/", data) if(result): message = result.get('message') containers = message['containers'] status = message['status'] return self.render(self.template_path, containers = containers, user = session['username'], clustername = self.clustername, status = status) else: self.error() class saveImageView(normalView): template_path = "saveconfirm.html" success_path = "opsuccess.html" error_path = "error.html" @classmethod def post(self): data = { "clustername": self.clustername, "image": self.imagename, "containername": self.containername, "description": self.description, "isforce": self.isforce } result = dockletRequest.post("/cluster/save/", data) if(result): if result.get('success') == 'true': #return self.render(self.success_path, user = session['username']) return redirect("/config/") #res = detailClusterView() #res.clustername = self.clustername #return res.as_view() else: if result.get('reason') == "exists": return self.render(self.template_path, containername = self.containername, clustername = self.clustername, image = self.imagename, user = session['username'], description = self.description) else: return self.render(self.error_path, message = result.get('message')) else: self.error() class shareImageView(normalView): template_path = "dashboard.html" @classmethod def get(self): data = { "image": self.image } result = dockletRequest.post("/image/share/", data) if(result): return redirect("/config/") else: self.error() class unshareImageView(normalView): template_path = "dashboard.html" @classmethod def get(self): data = { "image": self.image } result = dockletRequest.post("/image/unshare/", data) if(result): return redirect("/config/") else: self.error() class deleteImageView(normalView): template_path = "dashboard.html" @classmethod def get(self): data = { "image": self.image } result = dockletRequest.post("/image/delete/", data) if(result): return redirect("/config/") else: self.error() class addproxyView(normalView): @classmethod def post(self): data = { "clustername": self.clustername, "ip": self.ip, "port": self.port } result = dockletRequest.post("/addproxy/", data) if(result): return redirect("/config/") else: self.error() class deleteproxyView(normalView): @classmethod def get(self): data = { "clustername":self.clustername } result = dockletRequest.post("/deleteproxy/", data) if(result): return redirect("/config/") else: self.error() @classmethod def post(self): return self.get() class configView(normalView): @classmethod def get(self): images = dockletRequest.post('/image/list/').get('images') clusters = dockletRequest.post("/cluster/list/").get("clusters") clusters_info = {} data={} for cluster in clusters: data["clustername"] = cluster result = dockletRequest.post("/cluster/info/",data).get("message") clusters_info[cluster] = result result = dockletRequest.post("/user/usageQuery/") quota = result.get("quota") usage = result.get("usage") default = result.get("default") restcpu = int(quota['cpu']) - int(usage['cpu']) restmemory = int(quota['memory']) - int(usage['memory']) restdisk = int(quota['disk']) - int(usage['disk']) if restcpu >= int(default['cpu']): defaultcpu = default['cpu'] elif restcpu <= 0: defaultcpu = "0" else: defaultcpu = str(restcpu) if restmemory >= int(default['memory']): defaultmemory = default['memory'] elif restmemory <= 0: defaultmemory = "0" else: defaultmemory = str(restmemory) if restdisk >= int(default['disk']): defaultdisk = default['disk'] elif restdisk <= 0: defaultdisk = "0" else: defaultdisk = str(restdisk) defaultsetting = { 'cpu': defaultcpu, 'memory': defaultmemory, 'disk': defaultdisk } return self.render("config.html", images = images, clusters = clusters_info, mysession=dict(session), quota = quota, usage = usage, defaultsetting = defaultsetting) @classmethod def post(self): return self.get()
""" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Utility class designed to access the DOM of various web pages and perform a number of different fucntional tests. Any combination of these methods can be used to test a variety of functionality within the application. This is a constantly growing python file. Everything should be abstracted out enough so no values are hard code. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ # Selenium dependencies from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC import time import platform import os # Configuration options for Firefox caps = DesiredCapabilities.FIREFOX caps["marionette"] = True caps["binary"] = "C:\\Program Files (x86)\\Mozilla Firefox\\firefox" # Configuration options for Chrome my_chrome_options = Options() my_chrome_options.add_argument("--ignore-gpu-blacklist") # Constants # Localhost being used here on the VM. Can code in actual IP address if desired. url = "http://bigdata-node3.ama-inc.com:8000" #url = "http://192.168.100.155" browser_type = "chrome" # Driver driver = None # Connecting to browser and opening. # Returns the driver to be used to navigate. def open_browser(): global driver global url global browser_type # For Firefox. Can't use yet due to lack of functionality. if browser_type == 'firefox': driver = webdriver.Firefox(capabilities=caps) # Checking for type of OS if browser_type == 'chrome': if "Linux" in platform.platform(): driver = webdriver.Chrome('/home/localuser/Datacube/data_cube_ui/testsuite/drivers/chromedriver_linux', chrome_options=my_chrome_options) elif "Windows" in platform.platform(): driver_location = os.getcwd().replace("selenium","drivers") + "\\chromedriver_windows" driver = webdriver.Chrome(driver_location, chrome_options=my_chrome_options) driver.get(url) driver.maximize_window() log_in() return driver # Logging in. def log_in(): driver.find_element_by_id("login-button").click() driver.find_element_by_id("id_username").send_keys("localuser") driver.find_element_by_id("id_password").send_keys("amadev12") time.sleep(2) driver.find_element_by_id("log-in-submit").click() # Given a cube name will go to that cube. def get_cube(cube_name): action = webdriver.ActionChains(driver) elem = driver.find_element_by_id("map_tools") action.move_to_element(elem) time.sleep(2) action.move_to_element(driver.find_element_by_id(cube_name)) time.sleep(2) action.perform() time.sleep(2) driver.find_element_by_id(cube_name).click() # Run a job for Landsat 7/8 # band_selection must be a list of strings def create_job_landsat(lat_min, lat_max, lon_min, lon_max, start_date, end_date, title, description, landsat_number, sleep_time, band_selection): driver.find_element_by_id("satellite_sel").click() time.sleep(1) if landsat_number == "7": driver.find_element_by_xpath("//*[contains(text(), 'Landsat 7')]").click() elif landsat_number == "8": driver.find_element_by_xpath("//*[contains(text(), 'Landsat 8')]").click() driver.find_element_by_id("satellite_sel").click() driver.find_element_by_id("LANDSAT_"+landsat_number+"_band_selection_ms").click() checkboxes = driver.find_elements_by_xpath("//input[@type='checkbox']") for checkbox in checkboxes: #if 'LANDSAT_'+landsat_number in checkbox.get_attribute('id'): if ('LANDSAT_'+landsat_number in checkbox.get_attribute('id')) and (checkbox.get_attribute('title') in band_selection): checkbox.click() driver.find_element_by_id("LANDSAT_"+landsat_number+"_band_selection_ms").click() time.sleep(2) # Set Lon,Lat min and max. driver.find_element_by_id("LANDSAT_"+landsat_number+"_latitude_min").send_keys(lat_min) driver.find_element_by_id("LANDSAT_"+landsat_number+"_latitude_max").send_keys(lat_max) driver.find_element_by_id("LANDSAT_"+landsat_number+"_longitude_min").send_keys(lon_min) driver.find_element_by_id("LANDSAT_"+landsat_number+"_longitude_max").send_keys(lon_max) # Set the date. driver.find_element_by_id("LANDSAT_"+landsat_number+"_time_start").send_keys(start_date) driver.find_element_by_id("LANDSAT_"+landsat_number+"_time_end").clear() driver.find_element_by_id("LANDSAT_"+landsat_number+"_time_end").send_keys(end_date) time.sleep(2) driver.find_element_by_xpath("//*[contains(text(), 'Done')]").click() time.sleep(2) # Add additional info. driver.find_element_by_id("additional-info-LANDSAT_"+landsat_number+"").click() time.sleep(2) driver.find_element_by_id("query-title").send_keys(title) driver.find_element_by_id("query-description").send_keys(description) time.sleep(2) driver.find_element_by_id("save-and-close").click() time.sleep(2) # Submit request driver.find_element_by_id("submit-request-LANDSAT_"+landsat_number+"").click() driver.find_element_by_id("LANDSAT_"+landsat_number+"_latitude_min").click() time.sleep(sleep_time) # Cancels the currently running job given an ID of a cancel button. def cancel_job(job_id): driver.find_element_by_id(job_id).click() time.sleep(2) # Detects a popup and closes it. def close_popup(): try: driver.switch_to_alert().accept() time.sleep(2) except NoAlertPresentException as e: print("no alert") # Shows the results of the first item in the list of executed tasks. def show_results(): driver.find_element_by_id("past_0").click() time.sleep(2) driver.find_element_by_id("load0").click() time.sleep(2) driver.find_element_by_id("ui-id-2").click() time.sleep(2) driver.find_element_by_xpath("//div[@id='task_list']/h3").click() time.sleep(10) # Clicks to highlight the no data or unhighlight def toggle_no_data(): driver.find_element_by_xpath("//*[@name='show_nodata']").click() time.sleep(3) # Clicks to high or unhide the data. def toggle_show_hide(): driver.find_element_by_xpath("//*[contains(text(), 'Show/Hide')]").click() time.sleep(3) # Loads the first of scene. Wait time necesary because the scene may be rather larges. def load_single_scene(wait_time): driver.find_element_by_xpath("//*[contains(text(), 'Load this scene')]").click() time.sleep(wait_time) # Havigate to task manager. def go_to_task_manager(): driver.find_element_by_id("logout-button") driver.find_element_by_id("task-manager-nav").click() time.sleep(2) # Click on the details for a single query. def get_details_page(): driver.find_element_by_class_name("btn").click() time.sleep(2) # Click to view image by itself. def view_image(): driver.find_element_by_xpath("//*[contains(text(), 'View image')]").click() time.sleep(3) driver.find_element_by_tag_name("body").send_keys(Keys.CONTROL + "w") time.sleep(2) # Download the TIF file def download_tif(): driver.find_element_by_xpath("//*[contains(text(), 'Download tif')]").click() time.sleep(2) # Logging out method def logout(): driver.find_element_by_id("logout-button").click() time.sleep(5) # Sends keys to the search bar in the task manager. def type_keys_in_search(list_of_keys): driver.find_element_by_xpath("//div[@id='query-list-table-full_filter']/label/input").send_keys(list_of_keys) time.sleep(3) driver.find_element_by_xpath("//div[@id='query-list-table-full_filter']/label/input").clear() time.sleep(1) # Click next or previouse. def navigate_pagination(pagination_fp): if pagination_fp == 'next': driver.find_element_by_xpath("//div[@id='query-list-table-full_paginate']/a[@id='query-list-table-full_next']").click() elif pagination_fp == 'previous': driver.find_element_by_xpath("//div[@id='query-list-table-full_paginate']/a[@id='query-list-table-full_previous']").click() time.sleep(3)
from __future__ import absolute_import from __future__ import division from __future__ import print_function import click import logging import os import subprocess import operator from datetime import datetime import pandas as pd from pandas.api.types import is_string_dtype, is_numeric_dtype from ray.tune.result import (TRAINING_ITERATION, MEAN_ACCURACY, MEAN_LOSS, TIME_TOTAL_S, TRIAL_ID, CONFIG_PREFIX) from ray.tune.analysis import Analysis from ray.tune import TuneError try: from tabulate import tabulate except ImportError: tabulate = None logger = logging.getLogger(__name__) EDITOR = os.getenv("EDITOR", "vim") TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S (%A)" DEFAULT_EXPERIMENT_INFO_KEYS = ("trainable_name", "experiment_tag", TRAINING_ITERATION, TIME_TOTAL_S, MEAN_ACCURACY, MEAN_LOSS, TRIAL_ID) DEFAULT_PROJECT_INFO_KEYS = ( "name", "total_trials", "last_updated", ) try: TERM_HEIGHT, TERM_WIDTH = subprocess.check_output(["stty", "size"]).split() TERM_HEIGHT, TERM_WIDTH = int(TERM_HEIGHT), int(TERM_WIDTH) except subprocess.CalledProcessError: TERM_HEIGHT, TERM_WIDTH = 100, 100 OPERATORS = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def _check_tabulate(): """Checks whether tabulate is installed.""" if tabulate is None: raise ImportError( "Tabulate not installed. Please run `pip install tabulate`.") def print_format_output(dataframe): """Prints output of given dataframe to fit into terminal. Returns: table (pd.DataFrame): Final outputted dataframe. dropped_cols (list): Columns dropped due to terminal size. empty_cols (list): Empty columns (dropped on default). """ print_df = pd.DataFrame() dropped_cols = [] empty_cols = [] # column display priority is based on the info_keys passed in for i, col in enumerate(dataframe): if dataframe[col].isnull().all(): # Don't add col to print_df if is fully empty empty_cols += [col] continue print_df[col] = dataframe[col] test_table = tabulate(print_df, headers="keys", tablefmt="psql") if str(test_table).index("\n") > TERM_WIDTH: # Drop all columns beyond terminal width print_df.drop(col, axis=1, inplace=True) dropped_cols += list(dataframe.columns)[i:] break table = tabulate( print_df, headers="keys", tablefmt="psql", showindex="never") print(table) if dropped_cols: click.secho("Dropped columns: {}".format(dropped_cols), fg="yellow") click.secho("Please increase your terminal size " "to view remaining columns.") if empty_cols: click.secho("Empty columns: {}".format(empty_cols), fg="yellow") return table, dropped_cols, empty_cols def list_trials(experiment_path, sort=None, output=None, filter_op=None, info_keys=None, limit=None, desc=False): """Lists trials in the directory subtree starting at the given path. Args: experiment_path (str): Directory where trials are located. Like Experiment.local_dir/Experiment.name/experiment*.json. sort (list): Keys to sort by. output (str): Name of file where output is saved. filter_op (str): Filter operation in the format "<column> <operator> <value>". info_keys (list): Keys that are displayed. limit (int): Number of rows to display. desc (bool): Sort ascending vs. descending. """ _check_tabulate() try: checkpoints_df = Analysis(experiment_path).dataframe() except TuneError: raise click.ClickException("No trial data found!") def key_filter(k): return k in DEFAULT_EXPERIMENT_INFO_KEYS or k.startswith(CONFIG_PREFIX) col_keys = [k for k in checkpoints_df.columns if key_filter(k)] if info_keys: for k in info_keys: if k not in checkpoints_df.columns: raise click.ClickException("Provided key invalid: {}. " "Available keys: {}.".format( k, checkpoints_df.columns)) col_keys = [k for k in checkpoints_df.columns if k in info_keys] if not col_keys: raise click.ClickException("No columns to output.") checkpoints_df = checkpoints_df[col_keys] if "last_update_time" in checkpoints_df: with pd.option_context("mode.use_inf_as_null", True): datetime_series = checkpoints_df["last_update_time"].dropna() datetime_series = datetime_series.apply( lambda t: datetime.fromtimestamp(t).strftime(TIMESTAMP_FORMAT)) checkpoints_df["last_update_time"] = datetime_series if "logdir" in checkpoints_df: # logdir often too long to view in table, so drop experiment_path checkpoints_df["logdir"] = checkpoints_df["logdir"].str.replace( experiment_path, "") if filter_op: col, op, val = filter_op.split(" ") col_type = checkpoints_df[col].dtype if is_numeric_dtype(col_type): val = float(val) elif is_string_dtype(col_type): val = str(val) # TODO(Andrew): add support for datetime and boolean else: raise click.ClickException("Unsupported dtype for {}: {}".format( val, col_type)) op = OPERATORS[op] filtered_index = op(checkpoints_df[col], val) checkpoints_df = checkpoints_df[filtered_index] if sort: for key in sort: if key not in checkpoints_df: raise click.ClickException("{} not in: {}".format( key, list(checkpoints_df))) ascending = not desc checkpoints_df = checkpoints_df.sort_values( by=sort, ascending=ascending) if limit: checkpoints_df = checkpoints_df[:limit] print_format_output(checkpoints_df) if output: file_extension = os.path.splitext(output)[1].lower() if file_extension in (".p", ".pkl", ".pickle"): checkpoints_df.to_pickle(output) elif file_extension == ".csv": checkpoints_df.to_csv(output, index=False) else: raise click.ClickException( "Unsupported filetype: {}".format(output)) click.secho("Output saved at {}".format(output), fg="green") def list_experiments(project_path, sort=None, output=None, filter_op=None, info_keys=None, limit=None, desc=False): """Lists experiments in the directory subtree. Args: project_path (str): Directory where experiments are located. Corresponds to Experiment.local_dir. sort (list): Keys to sort by. output (str): Name of file where output is saved. filter_op (str): Filter operation in the format "<column> <operator> <value>". info_keys (list): Keys that are displayed. limit (int): Number of rows to display. desc (bool): Sort ascending vs. descending. """ _check_tabulate() base, experiment_folders, _ = next(os.walk(project_path)) experiment_data_collection = [] for experiment_dir in experiment_folders: num_trials = sum( "result.json" in files for _, _, files in os.walk(os.path.join(base, experiment_dir))) experiment_data = {"name": experiment_dir, "total_trials": num_trials} experiment_data_collection.append(experiment_data) if not experiment_data_collection: raise click.ClickException("No experiments found!") info_df = pd.DataFrame(experiment_data_collection) if not info_keys: info_keys = DEFAULT_PROJECT_INFO_KEYS col_keys = [k for k in list(info_keys) if k in info_df] if not col_keys: raise click.ClickException( "None of keys {} in experiment data!".format(info_keys)) info_df = info_df[col_keys] if filter_op: col, op, val = filter_op.split(" ") col_type = info_df[col].dtype if is_numeric_dtype(col_type): val = float(val) elif is_string_dtype(col_type): val = str(val) # TODO(Andrew): add support for datetime and boolean else: raise click.ClickException("Unsupported dtype for {}: {}".format( val, col_type)) op = OPERATORS[op] filtered_index = op(info_df[col], val) info_df = info_df[filtered_index] if sort: for key in sort: if key not in info_df: raise click.ClickException("{} not in: {}".format( key, list(info_df))) ascending = not desc info_df = info_df.sort_values(by=sort, ascending=ascending) if limit: info_df = info_df[:limit] print_format_output(info_df) if output: file_extension = os.path.splitext(output)[1].lower() if file_extension in (".p", ".pkl", ".pickle"): info_df.to_pickle(output) elif file_extension == ".csv": info_df.to_csv(output, index=False) else: raise click.ClickException( "Unsupported filetype: {}".format(output)) click.secho("Output saved at {}".format(output), fg="green") def add_note(path, filename="note.txt"): """Opens a txt file at the given path where user can add and save notes. Args: path (str): Directory where note will be saved. filename (str): Name of note. Defaults to "note.txt" """ path = os.path.expanduser(path) assert os.path.isdir(path), "{} is not a valid directory.".format(path) filepath = os.path.join(path, filename) exists = os.path.isfile(filepath) try: subprocess.call([EDITOR, filepath]) except Exception as exc: click.secho("Editing note failed: {}".format(str(exc)), fg="red") if exists: print("Note updated at:", filepath) else: print("Note created at:", filepath)
# -*- coding: utf-8 -*- # # This file is part of Flask-Collect. # Copyright (C) 2012, 2013, 2014 Kirill Klenov. # Copyright (C) 2014 CERN. # # Flask-Collect is free software; you can redistribute it and/or modify it # under the terms of the Revised BSD License; see LICENSE file for # more details. import time from os import path as op, remove import subprocess from flask import Flask, Blueprint from functools import partial from shutil import rmtree from tempfile import mkdtemp from unittest import TestCase from flask_collect import Collect def filter_(order, items): """Filter application blueprints.""" def _key(item): if item.name in order: return order.index(item.name) return -1 return sorted(items, key=_key) class BaseTest(TestCase): def test_collect(self): app = Flask(__name__) blueprint = Blueprint( 'test1', __name__, static_folder='static1', static_url_path='/static/test1') app.register_blueprint(blueprint) blueprint = Blueprint('test2', __name__, static_folder='static2') app.register_blueprint(blueprint) static_root = mkdtemp() app.config['COLLECT_STATIC_ROOT'] = static_root collect = Collect(app) collect.collect(verbose=True) self.assertTrue(op.exists(op.join(static_root, 'test1', 'test.css'))) self.assertTrue(op.exists(op.join(static_root, 'js', 'test.js'))) self.assertTrue(op.exists(op.join(static_root, 'app.css'))) app.config['COLLECT_STORAGE'] = 'flask.ext.collect.storage.test' collect = Collect(app) test = collect.collect(verbose=True) self.assertEqual(len(test), 3) rmtree(static_root) def test_filter(self): """Test blueprint filter.""" app = Flask(__name__) blueprint = Blueprint('test1', __name__, static_folder='static1') app.register_blueprint(blueprint) blueprint = Blueprint('test3', __name__, static_folder='static3') app.register_blueprint(blueprint) static_root = mkdtemp() app.config['COLLECT_STATIC_ROOT'] = static_root app.config['COLLECT_FILTER'] = partial(filter_, ['test3', 'test1']) app.config['COLLECT_STORAGE'] = 'flask.ext.collect.storage.test' collect = Collect(app) test = list(collect.collect(verbose=True)) self.assertEqual(len(test), 2) self.assertTrue('static3' in test[1][1]) app.config['COLLECT_FILTER'] = partial(filter_, ['test1', 'test3']) collect = Collect(app) test = list(collect.collect(verbose=True)) self.assertTrue('static1' in test[1][1]) rmtree(static_root) def test_file_storage(self): """Test file storage.""" app = Flask(__name__) blueprint = Blueprint('test1', __name__, static_folder='static1') app.register_blueprint(blueprint) blueprint = Blueprint('test3', __name__, static_folder='static3') app.register_blueprint(blueprint) static_root = mkdtemp() app.config['COLLECT_STATIC_ROOT'] = static_root app.config['COLLECT_FILTER'] = partial(filter_, ['test3', 'test1']) app.config['COLLECT_STORAGE'] = 'flask.ext.collect.storage.file' collect = Collect(app) collect.collect() with open(op.join(static_root, 'test.css'), 'r') as file_: self.assertTrue('body { color: red; }' in file_.read()) rmtree(static_root) def test_file_storage_update(self): """Test file storage.""" dummy_app = Flask(__name__) test_static3 = mkdtemp() dummy_bp = Blueprint('dummy', __name__, static_folder='static3') dummy_app.register_blueprint(dummy_bp) dummy_app.config['COLLECT_STATIC_ROOT'] = test_static3 dummy_app.config['COLLECT_STORAGE'] = 'flask.ext.collect.storage.file' dummy_collect = Collect(dummy_app) dummy_collect.collect() app = Flask(__name__) blueprint = Blueprint('test1', __name__, static_folder='static1') app.register_blueprint(blueprint) blueprint = Blueprint('test3', __name__, static_folder=test_static3) app.register_blueprint(blueprint) static_root = mkdtemp() app.config['COLLECT_STATIC_ROOT'] = static_root app.config['COLLECT_FILTER'] = partial(filter_, ['test1', 'test3']) app.config['COLLECT_STORAGE'] = 'flask.ext.collect.storage.file' collect = Collect(app) collect.collect() with open(op.join(static_root, 'test.css'), 'r') as file_: self.assertTrue('body { color: blue; }' in file_.read()) time.sleep(1) subprocess.call(['touch', op.join(test_static3, 'test.css')]) # re-collect files collect.collect() # check that test3 was not added because it's newer with open(op.join(static_root, 'test.css'), 'r') as file_: self.assertTrue('body { color: blue; }' in file_.read()) rmtree(test_static3) rmtree(static_root) def test_link_storage(self): """Test file storage.""" dummy_app = Flask(__name__) test_static3 = mkdtemp() dummy_bp = Blueprint('dummy', __name__, static_folder='static3') dummy_app.register_blueprint(dummy_bp) dummy_app.config['COLLECT_STATIC_ROOT'] = test_static3 dummy_app.config['COLLECT_STORAGE'] = 'flask.ext.collect.storage.file' dummy_collect = Collect(dummy_app) dummy_collect.collect() with open(op.join(test_static3, 'test.css'), 'r') as file_: self.assertTrue('body { color: red; }' in file_.read()) app = Flask(__name__) blueprint = Blueprint('test1', __name__, static_folder='static1') app.register_blueprint(blueprint) blueprint = Blueprint('test2', __name__, static_folder='static2') app.register_blueprint(blueprint) blueprint = Blueprint('test3', __name__, static_folder=test_static3) app.register_blueprint(blueprint) static_root = mkdtemp() app.config['COLLECT_STATIC_ROOT'] = static_root app.config['COLLECT_FILTER'] = partial(filter_, ['test3', 'test1']) app.config['COLLECT_STORAGE'] = 'flask.ext.collect.storage.link' collect = Collect(app) collect.collect() with open(op.join(static_root, 'test.css'), 'r') as file_: self.assertTrue('body { color: red; }' in file_.read()) with open(op.join(test_static3, 'test.css'), 'w') as file_: file_.write('body { color: green; }') with open(op.join(static_root, 'test.css'), 'r') as file_: self.assertTrue('body { color: green; }' in file_.read()) # remove custom test.css and re-collect files remove(op.join(test_static3, 'test.css')) collect.collect() with open(op.join(static_root, 'test.css'), 'r') as file_: # we get the file content from test1 self.assertTrue('body { color: blue; }' in file_.read()) rmtree(test_static3) rmtree(static_root) def test_link_storage_update(self): """Test link storage update.""" app = Flask(__name__) blueprint = Blueprint('test1', __name__, static_folder='static1') app.register_blueprint(blueprint) static_root = mkdtemp() app.config['COLLECT_STATIC_ROOT'] = static_root app.config['COLLECT_FILTER'] = partial(filter_, ['test1']) app.config['COLLECT_STORAGE'] = 'flask.ext.collect.storage.link' collect = Collect(app) collect.collect() # Make sure a new link has been created pointing to test1 with open(op.join(static_root, 'test.css'), 'r') as file_: self.assertTrue('body { color: blue; }' in file_.read()) blueprint = Blueprint('test3', __name__, static_folder='static3') app.register_blueprint(blueprint) app.config['COLLECT_FILTER'] = partial(filter_, ['test3', 'test1']) collect = Collect(app) collect.collect() # Make sure a new link has been created pointing to test3 with open(op.join(static_root, 'test.css'), 'r') as file_: self.assertTrue('body { color: red; }' in file_.read()) rmtree(static_root)
""" A small templating language This implements a small templating language. This language implements if/elif/else, for/continue/break, expressions, and blocks of Python code. The syntax is:: {{any expression (function calls etc)}} {{any expression | filter}} {{for x in y}}...{{endfor}} {{if x}}x{{elif y}}y{{else}}z{{endif}} {{py:x=1}} {{py: def foo(bar): return 'baz' }} {{default var = default_value}} {{# comment}} You use this with the ``Template`` class or the ``sub`` shortcut. The ``Template`` class takes the template string and the name of the template (for errors) and a default namespace. Then (like ``string.Template``) you can call the ``tmpl.substitute(**kw)`` method to make a substitution (or ``tmpl.substitute(a_dict)``). ``sub(content, **kw)`` substitutes the template immediately. You can use ``__name='tmpl.html'`` to set the name of the template. If there are syntax errors ``TemplateError`` will be raised. This copy of tempita was taken from https://github.com/gjhiggins/tempita with a few changes to remove the six dependency. """ from __future__ import absolute_import, division, print_function import re import sys try: from urllib.parse import quote as url_quote from io import StringIO from html import escape as html_escape except ImportError: from urllib import quote as url_quote from cStringIO import StringIO from cgi import escape as html_escape import os import tokenize from ._looper import looper from .compat3 import ( PY3, bytes, basestring_, next, is_unicode, coerce_text, iteritems) __all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate', 'sub_html', 'html', 'bunch'] in_re = re.compile(r'\s+in\s+') var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) class TemplateError(Exception): """Exception raised while parsing a template """ def __init__(self, message, position, name=None): Exception.__init__(self, message) self.position = position self.name = name def __str__(self): msg = ' '.join(self.args) if self.position: msg = '%s at line %s column %s' % ( msg, self.position[0], self.position[1]) if self.name: msg += ' in %s' % self.name return msg class _TemplateContinue(Exception): pass class _TemplateBreak(Exception): pass def get_file_template(name, from_template): path = os.path.join(os.path.dirname(from_template.name), name) return from_template.__class__.from_filename( path, namespace=from_template.namespace, get_template=from_template.get_template) class Template(object): default_namespace = { 'start_braces': '{{', 'end_braces': '}}', 'looper': looper, } default_encoding = 'utf8' default_inherit = None def __init__(self, content, name=None, namespace=None, stacklevel=None, get_template=None, default_inherit=None, line_offset=0, delimeters=None): self.content = content # set delimeters if delimeters is None: delimeters = (self.default_namespace['start_braces'], self.default_namespace['end_braces']) else: assert len(delimeters) == 2 and all( [isinstance(delimeter, basestring_) for delimeter in delimeters]) self.default_namespace = self.__class__.default_namespace.copy() self.default_namespace['start_braces'] = delimeters[0] self.default_namespace['end_braces'] = delimeters[1] self.delimeters = delimeters self._unicode = is_unicode(content) if name is None and stacklevel is not None: try: caller = sys._getframe(stacklevel) except ValueError: pass else: globals = caller.f_globals lineno = caller.f_lineno if '__file__' in globals: name = globals['__file__'] if name.endswith('.pyc') or name.endswith('.pyo'): name = name[:-1] elif '__name__' in globals: name = globals['__name__'] else: name = '<string>' if lineno: name += ':%s' % lineno self.name = name self._parsed = parse( content, name=name, line_offset=line_offset, delimeters=self.delimeters) if namespace is None: namespace = {} self.namespace = namespace self.get_template = get_template if default_inherit is not None: self.default_inherit = default_inherit def from_filename(cls, filename, namespace=None, encoding=None, default_inherit=None, get_template=get_file_template): f = open(filename, 'rb') c = f.read() f.close() if encoding: c = c.decode(encoding) elif PY3: c = c.decode('latin-1') return cls(content=c, name=filename, namespace=namespace, default_inherit=default_inherit, get_template=get_template) from_filename = classmethod(from_filename) def __repr__(self): return '<%s %s name=%r>' % ( self.__class__.__name__, hex(id(self))[2:], self.name) def substitute(self, *args, **kw): if args: if kw: raise TypeError( "You can only give positional *or* keyword arguments") if len(args) > 1: raise TypeError( "You can only give one positional argument") if not hasattr(args[0], 'items'): raise TypeError( ("If you pass in a single argument, you must pass in a ", "dict-like object (with a .items() method); you gave %r") % (args[0],)) kw = args[0] ns = kw ns['__template_name__'] = self.name if self.namespace: ns.update(self.namespace) result, defs, inherit = self._interpret(ns) if not inherit: inherit = self.default_inherit if inherit: result = self._interpret_inherit(result, defs, inherit, ns) return result def _interpret(self, ns): # __traceback_hide__ = True parts = [] defs = {} self._interpret_codes(self._parsed, ns, out=parts, defs=defs) if '__inherit__' in defs: inherit = defs.pop('__inherit__') else: inherit = None return ''.join(parts), defs, inherit def _interpret_inherit(self, body, defs, inherit_template, ns): # __traceback_hide__ = True if not self.get_template: raise TemplateError( 'You cannot use inheritance without passing in get_template', position=None, name=self.name) templ = self.get_template(inherit_template, self) self_ = TemplateObject(self.name) for name, value in iteritems(defs): setattr(self_, name, value) self_.body = body ns = ns.copy() ns['self'] = self_ return templ.substitute(ns) def _interpret_codes(self, codes, ns, out, defs): # __traceback_hide__ = True for item in codes: if isinstance(item, basestring_): out.append(item) else: self._interpret_code(item, ns, out, defs) def _interpret_code(self, code, ns, out, defs): # __traceback_hide__ = True name, pos = code[0], code[1] if name == 'py': self._exec(code[2], ns, pos) elif name == 'continue': raise _TemplateContinue() elif name == 'break': raise _TemplateBreak() elif name == 'for': vars, expr, content = code[2], code[3], code[4] expr = self._eval(expr, ns, pos) self._interpret_for(vars, expr, content, ns, out, defs) elif name == 'cond': parts = code[2:] self._interpret_if(parts, ns, out, defs) elif name == 'expr': parts = code[2].split('|') base = self._eval(parts[0], ns, pos) for part in parts[1:]: func = self._eval(part, ns, pos) base = func(base) out.append(self._repr(base, pos)) elif name == 'default': var, expr = code[2], code[3] if var not in ns: result = self._eval(expr, ns, pos) ns[var] = result elif name == 'inherit': expr = code[2] value = self._eval(expr, ns, pos) defs['__inherit__'] = value elif name == 'def': name = code[2] signature = code[3] parts = code[4] ns[name] = defs[name] = TemplateDef( self, name, signature, body=parts, ns=ns, pos=pos) elif name == 'comment': return else: assert 0, "Unknown code: %r" % name def _interpret_for(self, vars, expr, content, ns, out, defs): # __traceback_hide__ = True for item in expr: if len(vars) == 1: ns[vars[0]] = item else: if len(vars) != len(item): raise ValueError( 'Need %i items to unpack (got %i items)' % (len(vars), len(item))) for name, value in zip(vars, item): ns[name] = value try: self._interpret_codes(content, ns, out, defs) except _TemplateContinue: continue except _TemplateBreak: break def _interpret_if(self, parts, ns, out, defs): # __traceback_hide__ = True # @@: if/else/else gets through for part in parts: assert not isinstance(part, basestring_) name, pos = part[0], part[1] if name == 'else': result = True else: result = self._eval(part[2], ns, pos) if result: self._interpret_codes(part[3], ns, out, defs) break def _eval(self, code, ns, pos): # __traceback_hide__ = True try: try: value = eval(code, self.default_namespace, ns) except SyntaxError as e: raise SyntaxError( 'invalid syntax in expression: %s' % code) return value except: exc_info = sys.exc_info() e = exc_info[1] if getattr(e, 'args', None): arg0 = e.args[0] else: arg0 = coerce_text(e) e.args = (self._add_line_info(arg0, pos),) if PY3: raise (e) else: raise (exc_info[1], e, exc_info[2]) def _exec(self, code, ns, pos): # __traceback_hide__ = True try: exec (code, self.default_namespace, ns) except: exc_info = sys.exc_info() e = exc_info[1] if e.args: e.args = (self._add_line_info(e.args[0], pos),) else: e.args = (self._add_line_info(None, pos),) if PY3: raise (e) else: raise (exc_info[1], e, exc_info[2]) def _repr(self, value, pos): # __traceback_hide__ = True try: if value is None: return '' if self._unicode: value = str(value) if not is_unicode(value): value = value.decode('utf-8') else: if not isinstance(value, basestring_): value = coerce_text(value) if (is_unicode(value) and self.default_encoding): value = value.encode(self.default_encoding) except: exc_info = sys.exc_info() e = exc_info[1] e.args = (self._add_line_info(e.args[0], pos),) if PY3: raise (e) else: raise (exc_info[1], e, exc_info[2]) else: if self._unicode and isinstance(value, bytes): if not self.default_encoding: raise UnicodeDecodeError( 'Cannot decode bytes value %r into unicode ' '(no default_encoding provided)' % value) try: value = value.decode(self.default_encoding) except UnicodeDecodeError as e: raise UnicodeDecodeError( e.encoding, e.object, e.start, e.end, e.reason + ' in string %r' % value) elif not self._unicode and is_unicode(value): if not self.default_encoding: raise UnicodeEncodeError( 'Cannot encode unicode value %r into bytes ' '(no default_encoding provided)' % value) value = value.encode(self.default_encoding) return value def _add_line_info(self, msg, pos): msg = "%s at line %s column %s" % ( msg, pos[0], pos[1]) if self.name: msg += " in file %s" % self.name return msg def sub(content, delimeters=None, **kw): name = kw.get('__name') tmpl = Template(content, name=name, delimeters=delimeters) return tmpl.substitute(kw) def paste_script_template_renderer(content, vars, filename=None): tmpl = Template(content, name=filename) return tmpl.substitute(vars) class bunch(dict): def __init__(self, **kw): for name, value in iteritems(kw): setattr(self, name, value) def __setattr__(self, name, value): self[name] = value def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) def __getitem__(self, key): if 'default' in self: try: return dict.__getitem__(self, key) except KeyError: return dict.__getitem__(self, 'default') else: return dict.__getitem__(self, key) def __repr__(self): items = [ (k, v) for k, v in iteritems(self)] items.sort() return '<%s %s>' % ( self.__class__.__name__, ' '.join(['%s=%r' % (k, v) for k, v in items])) ############################################################ # HTML Templating ############################################################ class html(object): def __init__(self, value): self.value = value def __str__(self): return self.value def __html__(self): return self.value def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.value) def html_quote(value, force=True): if not force and hasattr(value, '__html__'): return value.__html__() if value is None: return '' if not isinstance(value, basestring_): value = coerce_text(value) if sys.version >= "3" and isinstance(value, bytes): value = html_escape(value.decode('latin1'), 1) value = value.encode('latin1') else: value = html_escape(value, 1) if sys.version < "3": if is_unicode(value): value = value.encode('ascii', 'xmlcharrefreplace') return value def url(v): v = coerce_text(v) if is_unicode(v): v = v.encode('utf8') return url_quote(v) def attr(**kw): kw = list(iteritems(kw)) kw.sort() parts = [] for name, value in kw: if value is None: continue if name.endswith('_'): name = name[:-1] parts.append('%s="%s"' % (html_quote(name), html_quote(value))) return html(' '.join(parts)) class HTMLTemplate(Template): default_namespace = Template.default_namespace.copy() default_namespace.update(dict( html=html, attr=attr, url=url, html_quote=html_quote)) def _repr(self, value, pos): if hasattr(value, '__html__'): value = value.__html__() quote = False else: quote = True plain = Template._repr(self, value, pos) if quote: return html_quote(plain) else: return plain def sub_html(content, **kw): name = kw.get('__name') tmpl = HTMLTemplate(content, name=name) return tmpl.substitute(kw) class TemplateDef(object): def __init__(self, template, func_name, func_signature, body, ns, pos, bound_self=None): self._template = template self._func_name = func_name self._func_signature = func_signature self._body = body self._ns = ns self._pos = pos self._bound_self = bound_self def __repr__(self): return '<tempita function %s(%s) at %s:%s>' % ( self._func_name, self._func_signature, self._template.name, self._pos) def __str__(self): return self() def __call__(self, *args, **kw): values = self._parse_signature(args, kw) ns = self._ns.copy() ns.update(values) if self._bound_self is not None: ns['self'] = self._bound_self out = [] subdefs = {} self._template._interpret_codes(self._body, ns, out, subdefs) return ''.join(out) def __get__(self, obj, type=None): if obj is None: return self return self.__class__( self._template, self._func_name, self._func_signature, self._body, self._ns, self._pos, bound_self=obj) def _parse_signature(self, args, kw): values = {} sig_args, var_args, var_kw, defaults = self._func_signature extra_kw = {} for name, value in iteritems(kw): if not var_kw and name not in sig_args: raise TypeError( 'Unexpected argument %s' % name) if name in sig_args: values[sig_args] = value else: extra_kw[name] = value args = list(args) sig_args = list(sig_args) while args: while sig_args and sig_args[0] in values: sig_args.pop(0) if sig_args: name = sig_args.pop(0) values[name] = args.pop(0) elif var_args: values[var_args] = tuple(args) break else: raise TypeError( 'Extra position arguments: %s' % ', '.join(repr(v) for v in args)) for name, value_expr in iteritems(defaults): if name not in values: values[name] = self._template._eval( value_expr, self._ns, self._pos) for name in sig_args: if name not in values: raise TypeError( 'Missing argument: %s' % name) if var_kw: values[var_kw] = extra_kw return values class TemplateObject(object): def __init__(self, name): self.__name = name self.get = TemplateObjectGetter(self) def __repr__(self): return '<%s %s>' % (self.__class__.__name__, self.__name) class TemplateObjectGetter(object): def __init__(self, template_obj): self.__template_obj = template_obj def __getattr__(self, attr): return getattr(self.__template_obj, attr, Empty) def __repr__(self): return '<%s around %r>' % ( self.__class__.__name__, self.__template_obj) class _Empty(object): def __call__(self, *args, **kw): return self def __str__(self): return '' def __repr__(self): return 'Empty' def __unicode__(self): return '' if PY3 else u'' def __iter__(self): return iter(()) def __bool__(self): return False if sys.version < "3": __nonzero__ = __bool__ Empty = _Empty() del _Empty ############################################################ # Lexing and Parsing ############################################################ def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None): if delimeters is None: delimeters = (Template.default_namespace['start_braces'], Template.default_namespace['end_braces']) in_expr = False chunks = [] last = 0 last_pos = (line_offset + 1, 1) token_re = re.compile(r'%s|%s' % (re.escape(delimeters[0]), re.escape(delimeters[1]))) for match in token_re.finditer(s): expr = match.group(0) pos = find_position(s, match.end(), last, last_pos) if expr == delimeters[0] and in_expr: raise TemplateError('%s inside expression' % delimeters[0], position=pos, name=name) elif expr == delimeters[1] and not in_expr: raise TemplateError('%s outside expression' % delimeters[1], position=pos, name=name) if expr == delimeters[0]: part = s[last:match.start()] if part: chunks.append(part) in_expr = True else: chunks.append((s[last:match.start()], last_pos)) in_expr = False last = match.end() last_pos = pos if in_expr: raise TemplateError('No %s to finish last expression' % delimeters[1], name=name, position=last_pos) part = s[last:] if part: chunks.append(part) if trim_whitespace: chunks = trim_lex(chunks) return chunks lex.__doc__ = """ Lex a string into chunks: >>> lex('hey') ['hey'] >>> lex('hey {{you}}') ['hey ', ('you', (1, 7))] >>> lex('hey {{') Traceback (most recent call last): ... tempita.TemplateError: No }} to finish last expression at line 1 column 7 >>> lex('hey }}') Traceback (most recent call last): ... tempita.TemplateError: }} outside expression at line 1 column 7 >>> lex('hey {{ {{') Traceback (most recent call last): ... tempita.TemplateError: {{ inside expression at line 1 column 10 """ if PY3 else """ Lex a string into chunks: >>> lex('hey') ['hey'] >>> lex('hey {{you}}') ['hey ', ('you', (1, 7))] >>> lex('hey {{') Traceback (most recent call last): ... TemplateError: No }} to finish last expression at line 1 column 7 >>> lex('hey }}') Traceback (most recent call last): ... TemplateError: }} outside expression at line 1 column 7 >>> lex('hey {{ {{') Traceback (most recent call last): ... TemplateError: {{ inside expression at line 1 column 10 """ statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)') single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break'] trail_whitespace_re = re.compile(r'\n\r?[\t ]*$') lead_whitespace_re = re.compile(r'^[\t ]*\n') def trim_lex(tokens): last_trim = None for i in range(len(tokens)): current = tokens[i] if isinstance(tokens[i], basestring_): # we don't trim this continue item = current[0] if not statement_re.search(item) and item not in single_statements: continue if not i: prev = '' else: prev = tokens[i - 1] if i + 1 >= len(tokens): next_chunk = '' else: next_chunk = tokens[i + 1] if (not isinstance(next_chunk, basestring_) or not isinstance(prev, basestring_)): continue prev_ok = not prev or trail_whitespace_re.search(prev) if i == 1 and not prev.strip(): prev_ok = True if last_trim is not None and last_trim + 2 == i and not prev.strip(): prev_ok = 'last' if (prev_ok and (not next_chunk or lead_whitespace_re.search( next_chunk) or ( i == len(tokens) - 2 and not next_chunk.strip()))): if prev: if ((i == 1 and not prev.strip()) or prev_ok == 'last'): tokens[i - 1] = '' else: m = trail_whitespace_re.search(prev) # +1 to leave the leading \n on: prev = prev[:m.start() + 1] tokens[i - 1] = prev if next_chunk: last_trim = i if i == len(tokens) - 2 and not next_chunk.strip(): tokens[i + 1] = '' else: m = lead_whitespace_re.search(next_chunk) next_chunk = next_chunk[m.end():] tokens[i + 1] = next_chunk return tokens trim_lex.__doc__ = r""" Takes a lexed set of tokens, and removes whitespace when there is a directive on a line by itself: >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) >>> tokens [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] >>> trim_lex(tokens) [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] """ if PY3 else r""" Takes a lexed set of tokens, and removes whitespace when there is a directive on a line by itself: >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) >>> tokens [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] >>> trim_lex(tokens) [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] """ def find_position(string, index, last_index, last_pos): """ Given a string and index, return (line, column) """ lines = string.count('\n', last_index, index) if lines > 0: column = index - string.rfind('\n', last_index, index) else: column = last_pos[1] + (index - last_index) return (last_pos[0] + lines, column) def parse(s, name=None, line_offset=0, delimeters=None): if delimeters is None: delimeters = (Template.default_namespace['start_braces'], Template.default_namespace['end_braces']) tokens = lex(s, name=name, line_offset=line_offset, delimeters=delimeters) result = [] while tokens: next_chunk, tokens = parse_expr(tokens, name) result.append(next_chunk) return result parse.__doc__ = r""" Parses a string into a kind of AST >>> parse('{{x}}') [('expr', (1, 3), 'x')] >>> parse('foo') ['foo'] >>> parse('{{if x}}test{{endif}}') [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] >>> parse( ... 'series->{{for x in y}}x={{x}}{{endfor}}' ... ) #doctest: +NORMALIZE_WHITESPACE ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] >>> parse('{{py:x=1}}') [('py', (1, 3), 'x=1')] >>> parse( ... '{{if x}}a{{elif y}}b{{else}}c{{endif}}' ... ) #doctest: +NORMALIZE_WHITESPACE [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] Some exceptions:: >>> parse('{{continue}}') Traceback (most recent call last): ... tempita.TemplateError: continue outside of for loop at line 1 column 3 >>> parse('{{if x}}foo') Traceback (most recent call last): ... tempita.TemplateError: No {{endif}} at line 1 column 3 >>> parse('{{else}}') Traceback (most recent call last): ... tempita.TemplateError: else outside of an if block at line 1 column 3 >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') Traceback (most recent call last): ... tempita.TemplateError: Unexpected endif at line 1 column 25 >>> parse('{{if}}{{endif}}') Traceback (most recent call last): ... tempita.TemplateError: if with no expression at line 1 column 3 >>> parse('{{for x y}}{{endfor}}') Traceback (most recent call last): ... tempita.TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 >>> parse('{{py:x=1\ny=2}}') #doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... tempita.TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 """ if PY3 else r""" Parses a string into a kind of AST >>> parse('{{x}}') [('expr', (1, 3), 'x')] >>> parse('foo') ['foo'] >>> parse('{{if x}}test{{endif}}') [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] >>> parse( ... 'series->{{for x in y}}x={{x}}{{endfor}}' ... ) #doctest: +NORMALIZE_WHITESPACE ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] >>> parse('{{py:x=1}}') [('py', (1, 3), 'x=1')] >>> parse( ... '{{if x}}a{{elif y}}b{{else}}c{{endif}}' ... ) #doctest: +NORMALIZE_WHITESPACE [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] Some exceptions:: >>> parse('{{continue}}') Traceback (most recent call last): ... TemplateError: continue outside of for loop at line 1 column 3 >>> parse('{{if x}}foo') Traceback (most recent call last): ... TemplateError: No {{endif}} at line 1 column 3 >>> parse('{{else}}') Traceback (most recent call last): ... TemplateError: else outside of an if block at line 1 column 3 >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') Traceback (most recent call last): ... TemplateError: Unexpected endif at line 1 column 25 >>> parse('{{if}}{{endif}}') Traceback (most recent call last): ... TemplateError: if with no expression at line 1 column 3 >>> parse('{{for x y}}{{endfor}}') Traceback (most recent call last): ... TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 >>> parse('{{py:x=1\ny=2}}') #doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 """ def parse_expr(tokens, name, context=()): if isinstance(tokens[0], basestring_): return tokens[0], tokens[1:] expr, pos = tokens[0] expr = expr.strip() if expr.startswith('py:'): expr = expr[3:].lstrip(' \t') if expr.startswith('\n') or expr.startswith('\r'): expr = expr.lstrip('\r\n') if '\r' in expr: expr = expr.replace('\r\n', '\n') expr = expr.replace('\r', '') expr += '\n' else: if '\n' in expr: raise TemplateError( 'Multi-line py blocks must start with a newline', position=pos, name=name) return ('py', pos, expr), tokens[1:] elif expr in ('continue', 'break'): if 'for' not in context: raise TemplateError( 'continue outside of for loop', position=pos, name=name) return (expr, pos), tokens[1:] elif expr.startswith('if '): return parse_cond(tokens, name, context) elif (expr.startswith('elif ') or expr == 'else'): raise TemplateError( '%s outside of an if block' % expr.split()[0], position=pos, name=name) elif expr in ('if', 'elif', 'for'): raise TemplateError( '%s with no expression' % expr, position=pos, name=name) elif expr in ('endif', 'endfor', 'enddef'): raise TemplateError( 'Unexpected %s' % expr, position=pos, name=name) elif expr.startswith('for '): return parse_for(tokens, name, context) elif expr.startswith('default '): return parse_default(tokens, name, context) elif expr.startswith('inherit '): return parse_inherit(tokens, name, context) elif expr.startswith('def '): return parse_def(tokens, name, context) elif expr.startswith('#'): return ('comment', pos, tokens[0][0]), tokens[1:] return ('expr', pos, tokens[0][0]), tokens[1:] def parse_cond(tokens, name, context): start = tokens[0][1] pieces = [] context = context + ('if',) while 1: if not tokens: raise TemplateError( 'Missing {{endif}}', position=start, name=name) if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endif'): return ('cond', start) + tuple(pieces), tokens[1:] next_chunk, tokens = parse_one_cond(tokens, name, context) pieces.append(next_chunk) def parse_one_cond(tokens, name, context): (first, pos), tokens = tokens[0], tokens[1:] content = [] if first.endswith(':'): first = first[:-1] if first.startswith('if '): part = ('if', pos, first[3:].lstrip(), content) elif first.startswith('elif '): part = ('elif', pos, first[5:].lstrip(), content) elif first == 'else': part = ('else', pos, None, content) else: assert 0, "Unexpected token %r at %s" % (first, pos) while 1: if not tokens: raise TemplateError( 'No {{endif}}', position=pos, name=name) if (isinstance(tokens[0], tuple) and ( tokens[0][0] == 'endif' or tokens[0][0].startswith( 'elif ') or tokens[0][0] == 'else')): return part, tokens next_chunk, tokens = parse_expr(tokens, name, context) content.append(next_chunk) def parse_for(tokens, name, context): first, pos = tokens[0] tokens = tokens[1:] context = ('for',) + context content = [] assert first.startswith('for ') if first.endswith(':'): first = first[:-1] first = first[3:].strip() match = in_re.search(first) if not match: raise TemplateError( 'Bad for (no "in") in %r' % first, position=pos, name=name) vars = first[:match.start()] if '(' in vars: raise TemplateError( 'You cannot have () in the variable section of a for loop (%r)' % vars, position=pos, name=name) vars = tuple([ v.strip() for v in first[:match.start()].split(',') if v.strip()]) expr = first[match.end():] while 1: if not tokens: raise TemplateError( 'No {{endfor}}', position=pos, name=name) if (isinstance(tokens[0], tuple) and tokens[0][0] == 'endfor'): return ('for', pos, vars, expr, content), tokens[1:] next_chunk, tokens = parse_expr(tokens, name, context) content.append(next_chunk) def parse_default(tokens, name, context): first, pos = tokens[0] assert first.startswith('default ') first = first.split(None, 1)[1] parts = first.split('=', 1) if len(parts) == 1: raise TemplateError( "Expression must be {{default var=value}}; no = found in %r" % first, position=pos, name=name) var = parts[0].strip() if ',' in var: raise TemplateError( "{{default x, y = ...}} is not supported", position=pos, name=name) if not var_re.search(var): raise TemplateError( "Not a valid variable name for {{default}}: %r" % var, position=pos, name=name) expr = parts[1].strip() return ('default', pos, var, expr), tokens[1:] def parse_inherit(tokens, name, context): first, pos = tokens[0] assert first.startswith('inherit ') expr = first.split(None, 1)[1] return ('inherit', pos, expr), tokens[1:] def parse_def(tokens, name, context): first, start = tokens[0] tokens = tokens[1:] assert first.startswith('def ') first = first.split(None, 1)[1] if first.endswith(':'): first = first[:-1] if '(' not in first: func_name = first sig = ((), None, None, {}) elif not first.endswith(')'): raise TemplateError("Function definition doesn't end with ): %s" % first, position=start, name=name) else: first = first[:-1] func_name, sig_text = first.split('(', 1) sig = parse_signature(sig_text, name, start) context = context + ('def',) content = [] while 1: if not tokens: raise TemplateError( 'Missing {{enddef}}', position=start, name=name) if (isinstance(tokens[0], tuple) and tokens[0][0] == 'enddef'): return ('def', start, func_name, sig, content), tokens[1:] next_chunk, tokens = parse_expr(tokens, name, context) content.append(next_chunk) def parse_signature(sig_text, name, pos): tokens = tokenize.generate_tokens(StringIO(sig_text).readline) sig_args = [] var_arg = None var_kw = None defaults = {} def get_token(pos=False): try: tok_type, tok_string, (srow, scol), (erow, ecol), line = next( tokens) except StopIteration: return tokenize.ENDMARKER, '' if pos: return tok_type, tok_string, (srow, scol), (erow, ecol) else: return tok_type, tok_string while 1: var_arg_type = None tok_type, tok_string = get_token() if tok_type == tokenize.ENDMARKER: break if tok_type == tokenize.OP and ( tok_string == '*' or tok_string == '**'): var_arg_type = tok_string tok_type, tok_string = get_token() if tok_type != tokenize.NAME: raise TemplateError('Invalid signature: (%s)' % sig_text, position=pos, name=name) var_name = tok_string tok_type, tok_string = get_token() if tok_type == tokenize.ENDMARKER or ( tok_type == tokenize.OP and tok_string == ','): if var_arg_type == '*': var_arg = var_name elif var_arg_type == '**': var_kw = var_name else: sig_args.append(var_name) if tok_type == tokenize.ENDMARKER: break continue if var_arg_type is not None: raise TemplateError('Invalid signature: (%s)' % sig_text, position=pos, name=name) if tok_type == tokenize.OP and tok_string == '=': nest_type = None unnest_type = None nest_count = 0 start_pos = end_pos = None parts = [] while 1: tok_type, tok_string, s, e = get_token(True) if start_pos is None: start_pos = s end_pos = e if tok_type == tokenize.ENDMARKER and nest_count: raise TemplateError('Invalid signature: (%s)' % sig_text, position=pos, name=name) if (not nest_count and (tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))): default_expr = isolate_expression( sig_text, start_pos, end_pos) defaults[var_name] = default_expr sig_args.append(var_name) break parts.append((tok_type, tok_string)) if nest_count \ and tok_type == tokenize.OP \ and tok_string == nest_type: nest_count += 1 elif nest_count \ and tok_type == tokenize.OP \ and tok_string == unnest_type: nest_count -= 1 if not nest_count: nest_type = unnest_type = None elif not nest_count \ and tok_type == tokenize.OP \ and tok_string in ('(', '[', '{'): nest_type = tok_string nest_count = 1 unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type] return sig_args, var_arg, var_kw, defaults def isolate_expression(string, start_pos, end_pos): srow, scol = start_pos srow -= 1 erow, ecol = end_pos erow -= 1 lines = string.splitlines(True) if srow == erow: return lines[srow][scol:ecol] parts = [lines[srow][scol:]] parts.extend(lines[srow + 1:erow]) if erow < len(lines): # It'll sometimes give (end_row_past_finish, 0) parts.append(lines[erow][:ecol]) return ''.join(parts) _fill_command_usage = """\ %prog [OPTIONS] TEMPLATE arg=value Use py:arg=value to set a Python value; otherwise all values are strings. """ def fill_command(args=None): import sys import optparse import pkg_resources import os if args is None: args = sys.argv[1:] dist = pkg_resources.get_distribution('Paste') parser = optparse.OptionParser( version=coerce_text(dist), usage=_fill_command_usage) parser.add_option( '-o', '--output', dest='output', metavar="FILENAME", help="File to write output to (default stdout)") parser.add_option( '--html', dest='use_html', action='store_true', help="Use HTML style filling (including automatic HTML quoting)") parser.add_option( '--env', dest='use_env', action='store_true', help="Put the environment in as top-level variables") options, args = parser.parse_args(args) if len(args) < 1: print('You must give a template filename') sys.exit(2) template_name = args[0] args = args[1:] vars = {} if options.use_env: vars.update(os.environ) for value in args: if '=' not in value: print('Bad argument: %r' % value) sys.exit(2) name, value = value.split('=', 1) if name.startswith('py:'): name = name[:3] value = eval(value) vars[name] = value if template_name == '-': template_content = sys.stdin.read() template_name = '<stdin>' else: f = open(template_name, 'rb', encoding="latin-1") template_content = f.read() f.close() if options.use_html: TemplateClass = HTMLTemplate else: TemplateClass = Template template = TemplateClass(template_content, name=template_name) result = template.substitute(vars) if options.output: f = open(options.output, 'wb') f.write(result) f.close() else: sys.stdout.write(result) if __name__ == '__main__': fill_command()
import collections import cv2 import math import numpy as np class Shred(collections.namedtuple('_Shred', 'contour features name piece_fname features_fname ' 'piece_in_context_fname simplified_contour sheet img_roi ' 'tags_suggestions')): """Stores the shred information, incl. tags suggestions.""" def __repr__(self): return '<Shred #%s of sheet %s>' % (self.name, self.sheet) class Sheet(object): """Represents a single sheet with shred extraction methods. Public API includes initializer and get_shreds() method. """ _backgrounds = [ # DARPA SHRED task #1 [ [np.array([30, 190, 180]), np.array([180, 255, 255])], [np.array([0, 240, 170]), np.array([50, 255, 255])] ], # Pink [[np.array([160, 50, 210]), np.array([200, 150, 255])]], # Denys $100 paper background [ [np.array([0, 100, 200]), np.array([20, 255, 255])], [np.array([140, 120, 200]), np.array([250, 255, 255])], ], # AB's brown paper [ [np.array([0, 70, 120]), np.array([30, 140, 200])], ] ] def __init__(self, orig_image, dpi, save_image): """Initializes a Sheet instance. Args: orig_image: cv.Mat instance with the original sheet image. dpi: optional (x resolution, y resolution) tuple or None. If set to None, will try to guess dpi. save_image: A callback to save debug images with args (name, img) """ self._shreds = None self.orig_img = orig_image self.save_image = save_image self._fg_mask = None self._shreds = None if dpi is None: self.res_x, self.res_y = self._guess_dpi() else: self.res_x, self.res_y = dpi def px_to_mm(self, px): """Convert given value in px to a value in millimetres according to sheet's DPI Args: px: integer, value in pixels Returns: value in millimetres """ return float(px) / self.res_x * 25.4 def get_shreds(self, feature_extractors, sheet_name): """Detects shreds in the current sheet and constructs Shred instances. Caches the results for further invocations. Args: feature_extractors: iterable of AbstractShredFeature instances to use for shreds feature assignment. sheet_name: string, included in shred attributes. Returns: list of Shred instances. """ if self._shreds is None: shreds = [] _, contours, _ = cv2.findContours(self._foreground_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for i, contour in enumerate(contours): shred = self._make_shred(contour, i, feature_extractors, sheet_name) if shred is not None: shreds.append(shred) self._shreds = shreds return self._shreds def _guess_dpi(self): # Note: this is inconsistent for images with larger y dimension: # if they have exif, dpi is returned for (x, y), otherwise it's (y, x). # Let's assume that we are dealing with A4 (8.27 in x 11.7) and try to # guess it w, h = min(self.orig_img.shape[:2]), max(self.orig_img.shape[:2]) xres, yres = w / 8.27, h / 11.7 # Will suffice for now if max(xres, yres) / min(xres, yres) > 1.1: raise ValueError("Dpi is not provided and can't be guessed.") return int(round(xres, -2)), int(round(yres, -2)) def _detect_scanner_background(self, img): # Method returns a mask describing detected scanner background # (gray borders around colored sheet where shreds are glued). # Problem here is that colored sheet might have borders of different # sizes on different sides of it and we don't know the color of the # sheet. # Also sheets don't always have perfectly straight edges or are # slightly rotated against edges of the scanner. # Idea is relatively simple: # Convert image to LAB and grab A and B channels. fimg = cv2.cvtColor(img, cv2.COLOR_BGR2Lab) _, a_channel, b_channel = cv2.split(fimg) def try_method(fimg, border, aggressive=True): fimg = cv2.copyMakeBorder(fimg, 5, 5, 5, 5, cv2.BORDER_CONSTANT, value=border) # Flood fill supposed background with white (255). if aggressive: cv2.floodFill(fimg, None, (10, 10), 255, 1, 1) else: cv2.floodFill(fimg, None, (10, 10), 255, 2, 2, cv2.FLOODFILL_FIXED_RANGE) # Binarize image into white background and black everything else. _, fimg = cv2.threshold(fimg, 254, 255, cv2.THRESH_BINARY) hist = cv2.calcHist([fimg], [0], None, [2], [0, 256]) return fimg, hist options = [ [a_channel, 127], [b_channel, 134], [a_channel, 127, False], [b_channel, 134, False], ] # And then try to add a border of predefined color around each channel # and flood fill scanner background starting from most # aggressive flood fill settings to least aggressive. for i, opt in enumerate(options): fimg, hist = try_method(*opt) # First setting that doesn't flood that doesn't hurt colored sheet # too badly wins. bg_ratio = hist[1] / sum(hist) if bg_ratio < 0.2: break # Then we dilate it a bit. fimg = cv2.morphologyEx(fimg, cv2.MORPH_DILATE, np.ones((5, 5), np.uint8), iterations=2) fimg = fimg[5:-5, 5:-5] # Searching for a biggest outter contour _, contours, _ = cv2.findContours(cv2.bitwise_not(fimg), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) main_contour = np.array(map(cv2.contourArea, contours)).argmax() # build a convex hull for it hull = cv2.convexHull(contours[main_contour]) # And make a mask out of it. fimg = np.zeros(fimg.shape[:2], np.uint8) cv2.fillPoly(fimg, [hull], 255) fimg = cv2.bitwise_not(fimg) # Cannot say I'm satisfied with algo but using grabcut here seems too # expensive. Also it was a lot of fun to build it. return fimg def _find_foreground_mask(self): # Let's build a simple mask to separate pieces from background # just by checking if pixel is in range of some colours m = 0 res = None # Here we calculate mask to separate background of the scanner scanner_bg = self._detect_scanner_background(self.orig_img) # And here we are trying to check different ranges for different # background to find the winner. hsv = cv2.cvtColor(self.orig_img, cv2.COLOR_BGR2HSV) for bg in self._backgrounds: mask = np.zeros(self.orig_img.shape[:2], np.uint8) for rng in bg: # As each pre-defined background is described by a list of # color ranges we are summing up their masks mask = cv2.bitwise_or(mask, cv2.inRange(hsv, rng[0], rng[1])) hist = cv2.calcHist([mask], [0], None, [2], [0, 256]) # And here we are searching for a biggest possible mask across all # possible predefined backgrounds if hist[1] > m: m = hist[1] res = mask # then we remove scanner background res = cv2.bitwise_or(scanner_bg, res) res = cv2.morphologyEx(res, cv2.MORPH_OPEN, np.ones((3, 7), np.uint8)) res = cv2.bitwise_not(res) # Init kernel for dilate/erode kernel = np.ones((5, 5), np.uint8) # optional blur of mask # mask = cv2.medianBlur(mask, 5) # Clean noise on background res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel) # Clean noise inside pieces res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel) # Write original image with no background for debug purposes #cv2.imwrite("debug/mask.tif", mask) return res @property def _foreground_mask(self): if self._fg_mask is None: self._fg_mask = self._find_foreground_mask() return self._fg_mask def _make_shred(self, c, name, feature_extractors, sheet_name): """Creates a Shred instances from a given contour. Args: c: cv2 contour object. name: string shred name within a sheet. feature_extractors: iterable of AbstractShredFeature instances. Returns: A new Shred instance or None on failure. """ height, width, channels = self.orig_img.shape # bounding rect of currrent contour r_x, r_y, r_w, r_h = cv2.boundingRect(c) # Generating simplified contour to use it in html epsilon = 0.01 * cv2.arcLength(c, True) simplified_contour = cv2.approxPolyDP(c, epsilon, True) # filter out too small fragments if self.px_to_mm(r_w) <= 3 or self.px_to_mm(r_h) <= 3: print("Skipping piece #%s as too small (%spx x %s px)" % ( name, r_w, r_h)) return None if self.px_to_mm(r_w) >= 100 and self.px_to_mm(r_h) >= 100: print("Skipping piece #%s as too big (%spx x %s px)" % ( name, r_w, r_h)) return None # position of rect of min area. # this will provide us angle to straighten image box_center, bbox, angle = cv2.minAreaRect(c) # We want our pieces to be "vertical" if bbox[0] > bbox[1]: angle += 90 bbox = (bbox[1], bbox[0]) if bbox[1] / float(bbox[0]) > 70: print("Skipping piece #%s as too too long and narrow" % name) return None # Coords of region of interest using which we should crop piece after # rotation y1 = math.floor(box_center[1] - bbox[1] / 2) x1 = math.floor(box_center[0] - bbox[0] / 2) bbox = tuple(map(int, map(math.ceil, bbox))) # A mask we use to show only piece we are currently working on piece_mask = np.zeros([height, width, 1], dtype=np.uint8) cv2.drawContours(piece_mask, [c], -1, 255, cv2.FILLED) # apply mask to original image img_crp = self.orig_img[r_y:r_y + r_h, r_x:r_x + r_w] piece_in_context = self.save_image( "pieces/%s_ctx" % name, self.orig_img[max(r_y - 10, 0):r_y + r_h + 10, max(r_x - 10, 0):r_x + r_w + 10]) mask = piece_mask[r_y:r_y + r_h, r_x:r_x + r_w] img_roi = cv2.bitwise_and(img_crp, img_crp, mask=mask) # Add alpha layer and set it to the mask img_roi = cv2.cvtColor(img_roi, cv2.COLOR_BGR2BGRA) img_roi[:, :, 3] = mask[:, :, 0] # Straighten it # Because we crop original image before rotation we save us some memory # and a lot of time but we need to adjust coords of the center of # new min area rect M = cv2.getRotationMatrix2D((box_center[0] - r_x, box_center[1] - r_y), angle, 1) # And translate an image a bit to make it fit to the bbox again. # This is done with direct editing of the transform matrix. # (Wooohoo, I know matrix-fu) M[0][2] += r_x - x1 M[1][2] += r_y - y1 # Apply rotation/transform/crop img_roi = cv2.warpAffine(img_roi, M, bbox) piece_fname = self.save_image("pieces/%s" % name, img_roi, "png") # FEATURES MAGIC BELOW # # Get our mask/contour back after the trasnform _, _, _, mask = cv2.split(img_roi) _, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(contours) != 1: print("Piece #%s has strange contours after transform" % name) cnt = contours[0] features_fname = self.save_image("pieces/%s_mask" % name, mask, "png") base_features = { # On_sheet_* features describe the min counding box on the sheet. "on_sheet_x": r_x, "on_sheet_y": r_y, "on_sheet_width": r_w, "on_sheet_height": r_h, "on_sheet_angle": angle, "width": img_roi.shape[1], "height": img_roi.shape[0], } tags_suggestions = [] for feat in feature_extractors: fts, tags = feat.get_info(img_roi, cnt, name) base_features.update(fts) tags_suggestions += tags if tags_suggestions: print(name, tags_suggestions) return Shred( contour=c, features=base_features, features_fname=features_fname, img_roi=img_roi, name=name, piece_fname=piece_fname, piece_in_context_fname=piece_in_context, sheet=sheet_name, simplified_contour=simplified_contour, tags_suggestions=tags_suggestions, )
# -*- coding: utf-8 -*- """ Use blaze.bkernel to assemble ckernels for evaluation. """ from __future__ import absolute_import, division, print_function import collections from pykit.ir import visit, transform, Op from datashape.util import to_numba from numba import jit from ...bkernel.blaze_kernels import frompyfunc, BlazeElementKernel from ...bkernel.kernel_tree import Argument, KernelTree from ... import llvm_array #------------------------------------------------------------------------ # Interpreter #------------------------------------------------------------------------ def run(func, env): if env['strategy'] != 'jit': return # Identify the nodes to JIT jitted = identify_jitnodes(func, env) # Build up trees for all those nodes trees, arguments = build_kerneltrees(func, jitted) # JIT compile the nodes with maximal trees, deleting # all their children build_ckernels(func, jitted, trees, arguments) return func, env #------------------------------------------------------------------------ # Environment #------------------------------------------------------------------------ root_jit_env = { 'jit.jitted': None, # Jitted kernels, { Op : BlazeElementKernel } 'jit.trees': None, # (partial) kernel tree, { Op : KernelTree } 'jit.arguments': None, # Accumulated arguments, { Op : [ Op ] } } #------------------------------------------------------------------------ # Pipeline #------------------------------------------------------------------------ def identify_jitnodes(func, env): """ Identifies which nodes (kernels and converters) should be jitted, and creates a dictionary mapping them to corresponding BlazeElementKernels. """ v = IdentifyJitKernels(func, env) visit(v, func) v = IdentifyJitConvertors(func, v.jitted) visit(v, func) return v.jitted def build_kerneltrees(func, jitted): """ Builds the kernel trees for all the nodes that are to be jitted, also populating lists of arguments. """ fuser = BuildKernelTrees(func, jitted) visit(fuser, func) return fuser.trees, fuser.arguments def build_ckernels(func, jitted, trees, arguments): transformer = BuildCKernels(func, jitted, trees, arguments) transform(transformer, func) # Delete dead ops in reverse dominating order, so as to only delete ops # with 0 live uses for op in reversed(transformer.delete_later): op.delete() #------------------------------------------------------------------------ # Jit kernels #------------------------------------------------------------------------ class IdentifyJitKernels(object): """ Determine which kernels may be jitted. Produces a dict `self.jitted` that maps Operations to BlazeElementKernels implementations. """ def __init__(self, func, env): self.func = func self.env = env self.strategies = self.env['strategies'] self.overloads = self.env['kernel.overloads'] self.jitted = {} def op_kernel(self, op): strategy = self.strategies[op] if strategy != 'jit': return overload = self.overloads.get((op, strategy)) if overload is not None: py_func, signature = overload blaze_element_kernel = construct_blaze_kernel(py_func, signature) self.jitted[op] = blaze_element_kernel def construct_blaze_kernel(py_func, signature): """ Parameters ========== function: blaze.function.BlazeFunc overload: blaze.overloading.Overload """ nb_argtypes = [to_numba(a.measure) for a in signature.argtypes] nb_restype = to_numba(signature.restype.measure) return frompyfunc(py_func, (nb_argtypes, nb_restype), signature.argtypes) class IdentifyJitConvertors(object): """ Determine which conversion operators should be jitted. Adds to the dict `self.jitted` that maps Operations to BlazeElementKernels implementations """ def __init__(self, func, jitted): self.func = func self.jitted = jitted def op_convert(self, op): # If all the uses of this convert op have been jitted, # then also jit this op if all(use in self.jitted for use in self.func.uses[op]): dtype = op.type.measure blaze_func = BlazeElementKernel(converter(dtype, op.args[0].type).lfunc) self.jitted[op] = blaze_func #------------------------------------------------------------------------ # Fuse jitted kernels #------------------------------------------------------------------------ def leaf_arg(type): kind = llvm_array.SCALAR rank = 0 llvmtype = to_numba(type.measure).to_llvm() tree = Argument(type, kind, rank, llvmtype) return tree class BuildKernelTrees(object): """ Build KernelTrees from the BlazeElementKernels in 'jitted'. """ def __init__(self, func, jitted): self.func = func self.jitted = jitted self.trees = {} self.arguments = collections.defaultdict(list) # Start off with the function arguments as leaves for the trees for arg in func.args: tree = leaf_arg(arg.type) self.trees[arg] = tree self.arguments[arg] = [(arg, tree)] def op_convert(self, op): # TODO: Rewrite 'convert' ops before any of this stuff to kernel appl arg = op.args[0] if op in self.jitted and arg in self.trees: children = [self.trees[arg]] self.trees[op] = KernelTree(self.jitted[op], children) self.arguments[op] = list(self.arguments[arg]) def op_kernel(self, op): elementkernel = self.jitted.get(op) if not elementkernel: return op consumers = self.func.uses[op] if len(consumers) > 1: # This Op has multiple consumers pass # TODO: Check external references in metadata in order to determine # whether this is a fusion boundary children = [] for i, arg in enumerate(op.args[1:]): rank = len(arg.type.shape) if arg in self.trees: # This argument already has a tree, include it # as a subtree tree = self.trees[arg] self.arguments[op].extend(self.arguments[arg]) elif arg in self.jitted: # TODO: the original code here doesn't work, shuffling things # around introduce unexpected LLVM bitcast exceptions raise RuntimeError('internal error in blaze JIT code with arg %r', arg) else: # This argument is a non-jittable kernel, add it as a leaf node if not all(c.metadata['elementwise'] for c in consumers if c.opcode == 'kernel'): raise NotImplementedError( "We have non-elementwise consumers that we don't know " "how to deal with") tree = leaf_arg(arg.type) self.trees[arg] = tree self.arguments[arg] = [(arg, tree)] self.arguments[op].append((arg, tree)) children.append(tree) self.trees[op] = KernelTree(elementkernel, children) #------------------------------------------------------------------------ # Rewrite to CKernels #------------------------------------------------------------------------ class BuildCKernels(object): def __init__(self, func, jitted, trees, arguments): self.func = func self.jitted = jitted self.trees = trees self.arguments = arguments self.delete_later = [] # Ops to delete afterwards def op_convert(self, op): if op.args[0] in self.trees and op in self.jitted: uses = self.func.uses[op] if all(u in self.jitted for u in uses): self.delete_later.append(op) def op_kernel(self, op): if op not in self.jitted: return op uses = self.func.uses[op] if all(u in self.trees for u in uses): # All our consumers know about us and have us as an argument # in their tree! Delete this op, only the root will perform a # rewrite. self.delete_later.append(op) elif any(u in self.trees for u in uses): # Some consumers have us as a node, but others don't. This # forms a ckernel boundary, so we need to detach ourselves! raise NotImplementedError else: # No consumer has us as an internal node in the kernel tree, we # are a kerneltree root tree = self.trees[op] # out_rank = len(op.type.shape) # tree = tree.adapt(out_rank, llvm_array.C_CONTIGUOUS) ckernel_deferred = tree.make_ckernel_deferred(op.type) # Flatten the tree of args, removing duplicates just # as the kernel tree does. args = [] for arg in op.args[1:]: # Skip op.args[0], the kernel string name for ir_arg, kt_arg in self.arguments[arg]: if ir_arg not in args: args.append(ir_arg) new_op = Op('ckernel', op.type, [ckernel_deferred, args], op.result) new_op.add_metadata({'rank': 0, 'parallel': True}) return new_op #------------------------------------------------------------------------ # Utils #------------------------------------------------------------------------ def make_blazefunc(f): #return BlazeFuncDeprecated(f.__name__, template=f) return BlazeElementKernel(f.lfunc) def converter(blaze_dtype, blaze_argtype): """ Generate an element-wise conversion function that numba can jit-compile. """ dtype = to_numba(blaze_dtype.measure) argtype = to_numba(blaze_argtype.measure) @jit(dtype(argtype)) def convert(value): return dtype(value) return convert def make_ckernel(blaze_func): raise NotImplementedError
#!/usr/bin/env python2.7 # Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates the appropriate JSON data for LB interop test scenarios.""" import json import os import yaml all_scenarios = [] # TODO(https://github.com/grpc/grpc-go/issues/2347): enable # client_falls_back_because_no_backends_* scenarios for Java/Go. # TODO(https://github.com/grpc/grpc-java/issues/4887): enable # *short_stream* scenarios for Java. # TODO(https://github.com/grpc/grpc-java/issues/4912): enable # Java TLS tests involving TLS to the balancer. def server_sec(transport_sec): if transport_sec == 'google_default_credentials': return 'alts', 'alts', 'tls' return transport_sec, transport_sec, transport_sec def generate_no_balancer_because_lb_a_record_returns_nx_domain(): all_configs = [] for transport_sec in [ 'insecure', 'alts', 'tls', 'google_default_credentials' ]: balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec) config = { 'name': 'no_balancer_because_lb_a_record_returns_nx_domain_%s' % transport_sec, 'skip_langs': [], 'transport_sec': transport_sec, 'balancer_configs': [], 'backend_configs': [], 'fallback_configs': [{ 'transport_sec': fallback_sec, }], 'cause_no_error_no_data_for_balancer_a_record': False, } all_configs.append(config) return all_configs all_scenarios += generate_no_balancer_because_lb_a_record_returns_nx_domain() def generate_no_balancer_because_lb_a_record_returns_no_data(): all_configs = [] for transport_sec in [ 'insecure', 'alts', 'tls', 'google_default_credentials' ]: balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec) config = { 'name': 'no_balancer_because_lb_a_record_returns_no_data_%s' % transport_sec, 'skip_langs': [], 'transport_sec': transport_sec, 'balancer_configs': [], 'backend_configs': [], 'fallback_configs': [{ 'transport_sec': fallback_sec, }], 'cause_no_error_no_data_for_balancer_a_record': True, } all_configs.append(config) return all_configs all_scenarios += generate_no_balancer_because_lb_a_record_returns_no_data() def generate_client_referred_to_backend(): all_configs = [] for balancer_short_stream in [True, False]: for transport_sec in [ 'insecure', 'alts', 'tls', 'google_default_credentials' ]: balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec) skip_langs = [] if transport_sec == 'tls': skip_langs += ['java'] if balancer_short_stream: skip_langs += ['java'] config = { 'name': 'client_referred_to_backend_%s_short_stream_%s' % (transport_sec, balancer_short_stream), 'skip_langs': skip_langs, 'transport_sec': transport_sec, 'balancer_configs': [{ 'transport_sec': balancer_sec, 'short_stream': balancer_short_stream, }], 'backend_configs': [{ 'transport_sec': backend_sec, }], 'fallback_configs': [], 'cause_no_error_no_data_for_balancer_a_record': False, } all_configs.append(config) return all_configs all_scenarios += generate_client_referred_to_backend() def generate_client_referred_to_backend_fallback_broken(): all_configs = [] for balancer_short_stream in [True, False]: for transport_sec in ['alts', 'tls', 'google_default_credentials']: balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec) skip_langs = [] if transport_sec == 'tls': skip_langs += ['java'] if balancer_short_stream: skip_langs += ['java'] config = { 'name': 'client_referred_to_backend_fallback_broken_%s_short_stream_%s' % (transport_sec, balancer_short_stream), 'skip_langs': skip_langs, 'transport_sec': transport_sec, 'balancer_configs': [{ 'transport_sec': balancer_sec, 'short_stream': balancer_short_stream, }], 'backend_configs': [{ 'transport_sec': backend_sec, }], 'fallback_configs': [{ 'transport_sec': 'insecure', }], 'cause_no_error_no_data_for_balancer_a_record': False, } all_configs.append(config) return all_configs all_scenarios += generate_client_referred_to_backend_fallback_broken() def generate_client_referred_to_backend_multiple_backends(): all_configs = [] for balancer_short_stream in [True, False]: for transport_sec in [ 'insecure', 'alts', 'tls', 'google_default_credentials' ]: balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec) skip_langs = [] if transport_sec == 'tls': skip_langs += ['java'] if balancer_short_stream: skip_langs += ['java'] config = { 'name': 'client_referred_to_backend_multiple_backends_%s_short_stream_%s' % (transport_sec, balancer_short_stream), 'skip_langs': skip_langs, 'transport_sec': transport_sec, 'balancer_configs': [{ 'transport_sec': balancer_sec, 'short_stream': balancer_short_stream, }], 'backend_configs': [{ 'transport_sec': backend_sec, }, { 'transport_sec': backend_sec, }, { 'transport_sec': backend_sec, }, { 'transport_sec': backend_sec, }, { 'transport_sec': backend_sec, }], 'fallback_configs': [], 'cause_no_error_no_data_for_balancer_a_record': False, } all_configs.append(config) return all_configs all_scenarios += generate_client_referred_to_backend_multiple_backends() def generate_client_falls_back_because_no_backends(): all_configs = [] for balancer_short_stream in [True, False]: for transport_sec in [ 'insecure', 'alts', 'tls', 'google_default_credentials' ]: balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec) skip_langs = ['go', 'java'] if transport_sec == 'tls': skip_langs += ['java'] if balancer_short_stream: skip_langs += ['java'] config = { 'name': 'client_falls_back_because_no_backends_%s_short_stream_%s' % (transport_sec, balancer_short_stream), 'skip_langs': skip_langs, 'transport_sec': transport_sec, 'balancer_configs': [{ 'transport_sec': balancer_sec, 'short_stream': balancer_short_stream, }], 'backend_configs': [], 'fallback_configs': [{ 'transport_sec': fallback_sec, }], 'cause_no_error_no_data_for_balancer_a_record': False, } all_configs.append(config) return all_configs all_scenarios += generate_client_falls_back_because_no_backends() def generate_client_falls_back_because_balancer_connection_broken(): all_configs = [] for transport_sec in ['alts', 'tls', 'google_default_credentials']: balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec) skip_langs = [] if transport_sec == 'tls': skip_langs = ['java'] config = { 'name': 'client_falls_back_because_balancer_connection_broken_%s' % transport_sec, 'skip_langs': skip_langs, 'transport_sec': transport_sec, 'balancer_configs': [{ 'transport_sec': 'insecure', 'short_stream': False, }], 'backend_configs': [], 'fallback_configs': [{ 'transport_sec': fallback_sec, }], 'cause_no_error_no_data_for_balancer_a_record': False, } all_configs.append(config) return all_configs all_scenarios += generate_client_falls_back_because_balancer_connection_broken() def generate_client_referred_to_backend_multiple_balancers(): all_configs = [] for balancer_short_stream in [True, False]: for transport_sec in [ 'insecure', 'alts', 'tls', 'google_default_credentials' ]: balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec) skip_langs = [] if transport_sec == 'tls': skip_langs += ['java'] if balancer_short_stream: skip_langs += ['java'] config = { 'name': 'client_referred_to_backend_multiple_balancers_%s_short_stream_%s' % (transport_sec, balancer_short_stream), 'skip_langs': skip_langs, 'transport_sec': transport_sec, 'balancer_configs': [ { 'transport_sec': balancer_sec, 'short_stream': balancer_short_stream, }, { 'transport_sec': balancer_sec, 'short_stream': balancer_short_stream, }, { 'transport_sec': balancer_sec, 'short_stream': balancer_short_stream, }, { 'transport_sec': balancer_sec, 'short_stream': balancer_short_stream, }, { 'transport_sec': balancer_sec, 'short_stream': balancer_short_stream, }, ], 'backend_configs': [{ 'transport_sec': backend_sec, },], 'fallback_configs': [], 'cause_no_error_no_data_for_balancer_a_record': False, } all_configs.append(config) return all_configs all_scenarios += generate_client_referred_to_backend_multiple_balancers() print((yaml.dump({ 'lb_interop_test_scenarios': all_scenarios, })))
#!/usr/bin/env python # encoding: utf-8 # # Copyright 2011 Disney Enterprises, Inc. All rights reserved # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * The names "Disney", "Walt Disney Pictures", "Walt Disney Animation # Studios" or the names of its contributors may NOT be used to # endorse or promote products derived from this software without # specific prior written permission from Walt Disney Pictures. # Disclaimer: THIS SOFTWARE IS PROVIDED BY WALT DISNEY PICTURES AND # CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE, NONINFRINGEMENT AND TITLE ARE DISCLAIMED. # IN NO EVENT SHALL WALT DISNEY PICTURES, THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND BASED ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. """ reposadocommon.py Created by Greg Neagle on 2011-03-03. """ import sys import os import imp import plistlib import time import urlparse import warnings from xml.parsers.expat import ExpatError def get_main_dir(): '''Returns the directory name of the script or the directory name of the exe if py2exe was used Code from http://www.py2exe.org/index.cgi/HowToDetermineIfRunningFromExe ''' if (hasattr(sys, "frozen") or hasattr(sys, "importers") or imp.is_frozen("__main__")): return os.path.dirname(sys.executable) return os.path.dirname(sys.argv[0]) def prefsFilePath(): '''Returns path to our preferences file.''' return os.path.join(get_main_dir(), 'preferences.plist') def pref(prefname): '''Returns a preference.''' default_prefs = { 'AppleCatalogURLs': [ ('http://swscan.apple.com/content/catalogs/' 'index.sucatalog'), ('http://swscan.apple.com/content/catalogs/' 'index-1.sucatalog'), ('http://swscan.apple.com/content/catalogs/others/' 'index-leopard.merged-1.sucatalog'), ('http://swscan.apple.com/content/catalogs/others/' 'index-leopard-snowleopard.merged-1.sucatalog'), ('http://swscan.apple.com/content/catalogs/others/' 'index-lion-snowleopard-leopard.merged-1.sucatalog'), ('http://swscan.apple.com/content/catalogs/others/' 'index-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog'), ('http://swscan.apple.com/content/catalogs/others/' 'index-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog') ], 'PreferredLocalizations': ['English', 'en'], 'CurlPath': '/usr/bin/curl' } try: prefs = plistlib.readPlist(prefsFilePath()) except (IOError, ExpatError): prefs = default_prefs if prefname in prefs: return prefs[prefname] elif prefname in default_prefs: return default_prefs[prefname] else: return None def validPreferences(): '''Validates our preferences to make sure needed values are defined and paths exist. Returns boolean.''' prefs_valid = True for pref_name in ['UpdatesRootDir', 'UpdatesMetadataDir']: preference = pref(pref_name) if not preference: print_stderr('ERROR: %s is not defined in %s.' % (pref_name, prefsFilePath())) prefs_valid = False elif not os.path.exists(preference): print_stderr('WARNING: %s "%s" does not exist.' ' Will attempt to create it.' % (pref_name, preference)) return prefs_valid def configure_prefs(): """Configures prefs for use""" _prefs = {} keysAndPrompts = [ ('UpdatesRootDir', 'Filesystem path to store replicated catalogs and updates'), ('UpdatesMetadataDir', 'Filesystem path to store Reposado metadata'), ('LocalCatalogURLBase', 'Base URL for your local Software Update Service\n(Example: ' 'http://su.your.org -- leave empty if you are not replicating ' 'updates)'), ] if not os.path.exists(pref('CurlPath')): keysAndPrompts.append( ('CurlPath', 'Path to curl tool (Example: /usr/bin/curl)')) for (key, prompt) in keysAndPrompts: newvalue = raw_input('%15s [%s]: ' % (prompt, pref(key))) _prefs[key] = newvalue or pref(key) or '' prefspath = prefsFilePath() # retrieve current preferences try: prefs = plistlib.readPlist(prefspath) except (IOError, ExpatError): prefs = {} # merge edited preferences for key in _prefs.keys(): prefs[key] = _prefs[key] # write preferences to our file try: plistlib.writePlist(prefs, prefspath) except (IOError, ExpatError): print_stderr('Could not save configuration to %s', prefspath) else: # check to make sure they're valid unused_value = validPreferences() def str_to_ascii(s): """Given str (unicode, latin-1, or not) return ascii. Args: s: str, likely in Unicode-16BE, UTF-8, or Latin-1 charset Returns: str, ascii form, no >7bit chars """ try: return unicode(s).encode('ascii', 'ignore') except UnicodeDecodeError: return s.decode('ascii', 'ignore') def concat_message(msg, *args): """Concatenates a string with any additional arguments; drops unicode.""" msg = str_to_ascii(msg) if args: args = [str_to_ascii(arg) for arg in args] try: msg = msg % tuple(args) except TypeError: warnings.warn( 'String format does not match concat args: %s' % ( str(sys.exc_info()))) return msg def log(msg): """Generic logging function""" # date/time format string if not LOGFILE: return formatstr = '%b %d %H:%M:%S' try: fileobj = open(LOGFILE, mode='a', buffering=1) try: print >> fileobj, time.strftime(formatstr), msg.encode('UTF-8') except (OSError, IOError): pass fileobj.close() except (OSError, IOError): pass def print_stdout(msg, *args): """ Prints message and args to stdout. """ output = concat_message(msg, *args) if LOGFILE: log(output) else: print output sys.stdout.flush() def print_stderr(msg, *args): """ Prints message and args to stderr. """ output = concat_message(msg, *args) if LOGFILE: log(output) else: print >> sys.stderr, concat_message(msg, *args) def writeDataToPlist(data, filename): '''Writes a dict or list to a plist in our metadata dir''' metadata_dir = pref('UpdatesMetadataDir') if not os.path.exists(metadata_dir): try: os.makedirs(metadata_dir) except OSError, errmsg: print_stderr( 'Could not create missing %s because %s', metadata_dir, errmsg) try: plistlib.writePlist(data, os.path.join(metadata_dir, filename)) except (IOError, OSError, TypeError), errmsg: print_stderr( 'Could not write %s because %s', filename, errmsg) def getDataFromPlist(filename): '''Reads data from a plist in our metadata dir''' metadata_dir = pref('UpdatesMetadataDir') try: return plistlib.readPlist( os.path.join(metadata_dir, filename)) except (IOError, ExpatError): return {} def getDownloadStatus(): '''Reads download status info from disk''' return getDataFromPlist('DownloadStatus.plist') def writeDownloadStatus(download_status_list): '''Writes download status info to disk''' writeDataToPlist(download_status_list, 'DownloadStatus.plist') def getCatalogBranches(): '''Reads catalog branches info from disk''' return getDataFromPlist('CatalogBranches.plist') def writeCatalogBranches(catalog_branches): '''Writes catalog branches info to disk''' writeDataToPlist(catalog_branches, 'CatalogBranches.plist') def getProductInfo(): '''Reads Software Update product info from disk''' return getDataFromPlist('ProductInfo.plist') def writeProductInfo(product_info_dict): '''Writes Software Update product info to disk''' writeDataToPlist(product_info_dict, 'ProductInfo.plist') def getFilenameFromURL(url): '''Gets the filename from a URL''' (unused_scheme, unused_netloc, path, unused_query, unused_fragment) = urlparse.urlsplit(url) return os.path.basename(path) def getLocalPathNameFromURL(url, root_dir=None): '''Derives the appropriate local path name based on the URL''' if root_dir == None: root_dir = pref('UpdatesRootDir') (unused_scheme, unused_netloc, path, unused_query, unused_fragment) = urlparse.urlsplit(url) relative_path = path.lstrip('/') return os.path.join(root_dir, relative_path) def rewriteOneURL(full_url): '''Rewrites a single URL to point to our local replica''' our_base_url = pref('LocalCatalogURLBase') if not full_url.startswith(our_base_url): # only rewrite the URL if needed (unused_scheme, unused_netloc, path, unused_query, unused_fragment) = urlparse.urlsplit(full_url) return our_base_url + path else: return full_url def rewriteURLsForProduct(product): '''Rewrites the URLs for a product''' if 'ServerMetadataURL' in product: product['ServerMetadataURL'] = rewriteOneURL( product['ServerMetadataURL']) for package in product.get('Packages', []): if 'URL' in package: package['URL'] = rewriteOneURL(package['URL']) if 'MetadataURL' in package: package['MetadataURL'] = rewriteOneURL( package['MetadataURL']) # workaround for 10.8.2 issue where client ignores local pkg # and prefers Apple's URL. Need to revisit as we better understand this # issue if 'Digest' in package: # removing the Digest causes 10.8.2 to use the replica's URL # instead of Apple's del package['Digest'] distributions = product['Distributions'] for dist_lang in distributions.keys(): distributions[dist_lang] = rewriteOneURL( distributions[dist_lang]) def rewriteURLs(catalog): '''Rewrites all the URLs in the given catalog to point to our local replica''' if pref('LocalCatalogURLBase') == None: return if 'Products' in catalog: product_keys = list(catalog['Products'].keys()) for product_key in product_keys: product = catalog['Products'][product_key] rewriteURLsForProduct(product) def writeAllBranchCatalogs(): '''Writes out all branch catalogs. Used when we edit branches.''' for catalog_URL in pref('AppleCatalogURLs'): localcatalogpath = getLocalPathNameFromURL(catalog_URL) if os.path.exists(localcatalogpath): writeBranchCatalogs(localcatalogpath) else: print_stderr( 'WARNING: %s does not exist. Perhaps you need to run repo_sync?' % localcatalogpath) def writeBranchCatalogs(localcatalogpath): '''Writes our branch catalogs''' catalog = plistlib.readPlist(localcatalogpath) downloaded_products = catalog['Products'] product_info = getProductInfo() localcatalogname = os.path.basename(localcatalogpath) # now strip the '.sucatalog' bit from the name # so we can use it to construct our branch catalog names if localcatalogpath.endswith('.sucatalog'): localcatalogpath = localcatalogpath[0:-10] # now write filtered catalogs (branches) catalog_branches = getCatalogBranches() for branch in catalog_branches.keys(): branchcatalogpath = localcatalogpath + '_' + branch + '.sucatalog' print_stdout('Building %s...' % os.path.basename(branchcatalogpath)) # embed branch catalog name into the catalog for troubleshooting # and validation catalog['_CatalogName'] = os.path.basename(branchcatalogpath) catalog['Products'] = {} for product_key in catalog_branches[branch]: if product_key in downloaded_products.keys(): # add the product to the Products dict # for this catalog catalog['Products'][product_key] = \ downloaded_products[product_key] elif pref('LocalCatalogURLBase') and product_key in product_info: # Product has probably been deprecated by Apple, # so we're using cached product info # First check to see if this product was ever in this # catalog original_catalogs = product_info[product_key].get( 'OriginalAppleCatalogs', []) for original_catalog in original_catalogs: if original_catalog.endswith(localcatalogname): # this item was originally in this catalog, so # we can add it to the branch catalog_entry = \ product_info[product_key].get('CatalogEntry') title = product_info[product_key].get('title') version = product_info[product_key].get('version') if catalog_entry: print_stderr( 'WARNING: Product %s (%s-%s) in branch %s ' 'has been deprecated. Will use cached info ' 'and packages.', product_key, title, version, branch) rewriteURLsForProduct(catalog_entry) catalog['Products'][product_key] = catalog_entry continue else: if pref('LocalCatalogURLBase') : print_stderr( 'WARNING: Product %s not added to branch %s of %s. ' 'It is not in the corresponding Apple catalogs ' 'and is not in the ProductInfo cache.', product_key, branch, localcatalogname) else: print_stderr( 'WARNING: Product %s not added to branch %s of %s. ' 'It is not in the corresponding Apple catalog.', product_key, branch, localcatalogname) plistlib.writePlist(catalog, branchcatalogpath) def writeAllLocalCatalogs(): '''Writes out all local and branch catalogs. Used when we purge products.''' for catalog_URL in pref('AppleCatalogURLs'): localcatalogpath = getLocalPathNameFromURL(catalog_URL) + '.apple' if os.path.exists(localcatalogpath): writeLocalCatalogs(localcatalogpath) def writeLocalCatalogs(applecatalogpath): '''Writes our local catalogs based on the Apple catalog''' catalog = plistlib.readPlist(applecatalogpath) # rewrite the URLs within the catalog to point to the items on our # local server instead of Apple's rewriteURLs(catalog) # remove the '.apple' from the end of the localcatalogpath if applecatalogpath.endswith('.apple'): localcatalogpath = applecatalogpath[0:-6] else: localcatalogpath = applecatalogpath print_stdout('Building %s...' % os.path.basename(localcatalogpath)) catalog['_CatalogName'] = os.path.basename(localcatalogpath) downloaded_products_list = getDownloadStatus() downloaded_products = {} product_keys = list(catalog['Products'].keys()) # filter Products, removing those that haven't been downloaded for product_key in product_keys: if product_key in downloaded_products_list: downloaded_products[product_key] = \ catalog['Products'][product_key] else: print_stderr('WARNING: did not add product %s to ' 'catalog %s because it has not been downloaded.', product_key, os.path.basename(applecatalogpath)) catalog['Products'] = downloaded_products # write raw (unstable/development) catalog # with all downloaded Apple updates enabled plistlib.writePlist(catalog, localcatalogpath) # now write filtered catalogs (branches) based on this catalog writeBranchCatalogs(localcatalogpath) LOGFILE = None def main(): '''Placeholder''' pass if __name__ == '__main__': main()
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test Analog Value COV Services ------------------------------ """ import unittest from bacpypes.debugging import bacpypes_debugging, ModuleLogger from bacpypes.apdu import ( SubscribeCOVRequest, SimpleAckPDU, ConfirmedCOVNotificationRequest, UnconfirmedCOVNotificationRequest, ) from bacpypes.service.cov import ChangeOfValueServices from bacpypes.local.device import LocalDeviceObject from bacpypes.object import AnalogValueObject from .helpers import ApplicationNetwork, ApplicationStateMachine, COVTestClientServices # some debugging _debug = 0 _log = ModuleLogger(globals()) @bacpypes_debugging class TestAnalogValue(unittest.TestCase): def test_8_10_1(self): """Confirmed Notifications Subscription""" if _debug: TestAnalogValue._debug("test_8_10_1") # create a network anet = ApplicationNetwork("test_8_10_1") # add the ability to accept COV notifications to the TD anet.td.add_capability(COVTestClientServices) # tell the TD how to respond to confirmed notifications anet.td.test_ack = True anet.td.test_reject = None anet.td.test_abort = None # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # add it to the implementation anet.iut.add_object(test_av) # wait for the subscription anet.iut.start_state.doc("8.10.1-1-0") \ .receive(SubscribeCOVRequest).doc("8.10.1-1-1") \ .success() # send the subscription, wait for the ack anet.td.start_state.doc("8.10.1-2-0") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=True, lifetime=30, )).doc("8.10.1-2-1") \ .receive(SimpleAckPDU).doc("8.10.1-2-2") \ .success() # run the group anet.run() def test_8_10_2(self): """Unconfirmed Notifications Subscription""" if _debug: TestAnalogValue._debug("test_8_10_2") # create a network anet = ApplicationNetwork("test_8_10_2") # add the ability to accept COV notifications to the TD anet.td.add_capability(COVTestClientServices) # tell the TD how to respond to confirmed notifications anet.td.test_ack = True anet.td.test_reject = None anet.td.test_abort = None # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # add it to the implementation anet.iut.add_object(test_av) # wait for the subscription anet.iut.start_state.doc("8.10.2-1-0") \ .receive(SubscribeCOVRequest).doc("8.10.2-1-1") \ .success() # send the subscription, wait for the ack anet.td.start_state.doc("8.10.2-2-0") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=False, lifetime=30, )).doc("8.10.2-2-1") \ .receive(SimpleAckPDU).doc("8.10.2-2-2") \ .success() # run the group, cut the time limit short anet.run(time_limit=5.0) # check that the IUT still has the detection if _debug: TestAnalogValue._debug(" - detections: %r", anet.iut.cov_detections) assert len(anet.iut.cov_detections) == 1 # pop out the subscription list and criteria obj_ref, criteria = anet.iut.cov_detections.popitem() if _debug: TestAnalogValue._debug(" - criteria: %r", criteria) # get the list of subscriptions from the criteria subscriptions = criteria.cov_subscriptions.cov_subscriptions if _debug: TestAnalogValue._debug(" - subscriptions: %r", subscriptions) assert len(subscriptions) == 1 def test_8_10_3(self): """Canceling a Subscription""" if _debug: TestAnalogValue._debug("test_8_10_3") # create a network anet = ApplicationNetwork("test_8_10_3") # add the ability to accept COV notifications to the TD anet.td.add_capability(COVTestClientServices) # tell the TD how to respond to confirmed notifications anet.td.test_ack = True anet.td.test_reject = None anet.td.test_abort = None # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # add it to the implementation anet.iut.add_object(test_av) # wait for the subscription, then for the cancelation anet.iut.start_state.doc("8.10.3-1-0") \ .receive(SubscribeCOVRequest).doc("8.10.3-1-1") \ .receive(SubscribeCOVRequest).doc("8.10.3-1-2") \ .success() # send the subscription, wait for the ack, then send the cancelation # and wait for the ack. Ignore the notification that is sent when # after the subscription anet.td.start_state.doc("8.10.3-2-0") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=False, lifetime=30, )).doc("8.10.3-2-1") \ .ignore(UnconfirmedCOVNotificationRequest) \ .receive(SimpleAckPDU).doc("8.10.3-2-2") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), )).doc("8.10.3-2-1") \ .ignore(UnconfirmedCOVNotificationRequest) \ .receive(SimpleAckPDU).doc("8.10.3-2-2") \ .success() # run the group anet.run() def test_8_10_4(self): """Requests 8 Hour Lifetimes""" if _debug: TestAnalogValue._debug("test_8_10_4") # create a network anet = ApplicationNetwork("test_8_10_4") # add the ability to accept COV notifications to the TD anet.td.add_capability(COVTestClientServices) # tell the TD how to respond to confirmed notifications anet.td.test_ack = True anet.td.test_reject = None anet.td.test_abort = None # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # add it to the implementation anet.iut.add_object(test_av) # wait for the subscription anet.iut.start_state.doc("8.10.4-1-0") \ .receive(SubscribeCOVRequest).doc("8.10.4-1-1") \ .success() # send the subscription, wait for the ack anet.td.start_state.doc("8.10.4-2-0") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=True, lifetime=28800, )).doc("8.10.4-2-1") \ .receive(SimpleAckPDU).doc("8.10.4-2-2") \ .success() # run the group anet.run() def test_9_10_1_1(self): if _debug: TestAnalogValue._debug("test_9_10_1_1") notification_fail_time = 0.5 # create a network anet = ApplicationNetwork("test_9_10_1_1") # add the ability to accept COV notifications to the TD anet.td.add_capability(COVTestClientServices) # tell the TD how to respond to confirmed notifications anet.td.test_ack = True anet.td.test_reject = None anet.td.test_abort = None # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # add it to the implementation anet.iut.add_object(test_av) # wait for the subscription, wait for the notification ack anet.iut.start_state.doc("9.10.1.1-1-0") \ .receive(SubscribeCOVRequest).doc("9.10.1.1-1-1") \ .receive(SimpleAckPDU).doc("9.10.1.1-1-2") \ .timeout(10).doc("9.10.1.1-1-3") \ .success() # test device is quiet wait_for_notification = \ anet.td.start_state.doc("9.10.1.1-2-0") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=True, lifetime=30, )).doc("9.10.1.1-2-1") \ .receive(SimpleAckPDU).doc("9.10.1.1-2-2") # after the ack, don't wait too long for the notification wait_for_notification \ .timeout(notification_fail_time).doc("9.10.1.1-2-3").fail() # if the notification is received, success wait_for_notification \ .receive(ConfirmedCOVNotificationRequest).doc("9.10.1.1-2-4") \ .timeout(10).doc("9.10.1.1-2-5") \ .success() # run the group anet.run() def test_no_traffic(self): """Test basic configuration of a network.""" if _debug: TestAnalogValue._debug("test_no_traffic") # create a network anet = ApplicationNetwork("test_no_traffic") # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # an easy way to change the present value write_test_av = lambda v: setattr(test_av, 'presentValue', v) # add it to the implementation anet.iut.add_object(test_av) # make some transitions anet.iut.start_state.doc("1-1-0") \ .call(write_test_av, 100.0).doc("1-1-1") \ .timeout(1).doc("1-1-2") \ .call(write_test_av, 0.0).doc("1-1-3") \ .timeout(1).doc("1-1-4") \ .success() # test device is quiet anet.td.start_state.timeout(5).success() # run the group anet.run() def test_8_2_1(self): """To verify that the IUT can initiate ConfirmedCOVNotification service requests conveying a change of the Present_Value property of Analog Input, Analog Output, and Analog Value objects.""" if _debug: TestAnalogValue._debug("test_8_2_1") # create a network anet = ApplicationNetwork("test_8_2_1") # add the ability to accept COV notifications to the TD anet.td.add_capability(COVTestClientServices) # tell the TD how to respond to confirmed notifications anet.td.test_ack = True anet.td.test_reject = None anet.td.test_abort = None # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # an easy way to change the present value write_test_av = lambda v: setattr(test_av, 'presentValue', v) # add it to the implementation anet.iut.add_object(test_av) # receive the subscription request, wait until the client has # received the ack and the 'instant' notification. Then change the # value a little bit and nothing should be sent. Change it some more # and wait for the notification ack. anet.iut.start_state.doc("2-1-0") \ .receive(SubscribeCOVRequest).doc("2-1-1") \ .receive(SimpleAckPDU).doc("2-1-2") \ .wait_event("e1").doc("2-1-3") \ .call(write_test_av, 5.0).doc("2-1-4") \ .timeout(5).doc("2-2-5") \ .call(write_test_av, 10.0).doc("2-1-6") \ .receive(SimpleAckPDU).doc("2-1-7") \ .timeout(10).doc("2-2-8") \ .success() # send the subscription request, wait for the ack and the 'instant' # notification, set the event so the IUT can continue, then wait # for the next notification anet.td.start_state.doc("2-2-0") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=True, lifetime=30, )).doc("2-2-1") \ .receive(SimpleAckPDU).doc("2-2-2") \ .receive(ConfirmedCOVNotificationRequest).doc("2-2-4") \ .set_event("e1").doc("2-2-3") \ .receive(ConfirmedCOVNotificationRequest).doc("2-2-4") \ .timeout(10).doc("2-2-5") \ .success() # run the group anet.run() def test_simple_transition_unconfirmed(self): if _debug: TestAnalogValue._debug("test_simple_transition_unconfirmed") # create a network anet = ApplicationNetwork("test_simple_transition_unconfirmed") # add the ability to accept COV notifications to the TD anet.td.add_capability(COVTestClientServices) # tell the TD how to respond to confirmed notifications anet.td.test_ack = True anet.td.test_reject = None anet.td.test_abort = None # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # an easy way to change the present value write_test_av = lambda v: setattr(test_av, 'presentValue', v) # add it to the implementation anet.iut.add_object(test_av) # receive the subscription request, wait until the client has # received the ack and the 'instant' notification. Then change the # value, no ack coming back anet.iut.start_state.doc("3-1-0") \ .receive(SubscribeCOVRequest).doc("3-1-1") \ .wait_event("e1").doc("3-1-2") \ .call(write_test_av, 100.0).doc("3-1-3") \ .timeout(10).doc("3-2-4") \ .success() # test device is quiet anet.td.start_state.doc("3-2-0") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=False, lifetime=30, )).doc("3-2-1") \ .receive(SimpleAckPDU).doc("3-2-2") \ .receive(UnconfirmedCOVNotificationRequest).doc("3-2-3") \ .set_event("e1").doc("3-2-4") \ .receive(UnconfirmedCOVNotificationRequest).doc("3-2-5") \ .timeout(10).doc("3-2-6") \ .success() # run the group anet.run() def test_changing_status_flags(self): """This test changes the status flags of binary value point to verify that the detection picks up other changes, most tests just change the present value.""" if _debug: TestAnalogValue._debug("test_changing_status_flags") # create a network anet = ApplicationNetwork("test_changing_status_flags") # add the ability to accept COV notifications to the TD anet.td.add_capability(COVTestClientServices) # tell the TD how to respond to confirmed notifications anet.td.test_ack = True anet.td.test_reject = None anet.td.test_abort = None # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # an easy way to change the present value def test_av_fault(): if _debug: TestAnalogValue._debug("test_av_fault") test_av.statusFlags = [0, 1, 0, 0] # add it to the implementation anet.iut.add_object(test_av) # receive the subscription request, wait until the client has # received the ack and the 'instant' notification. Then change the # value, no ack coming back anet.iut.start_state.doc("4-1-0") \ .receive(SubscribeCOVRequest).doc("4-1-1") \ .wait_event("e1").doc("4-1-2") \ .call(test_av_fault).doc("4-1-3") \ .timeout(10).doc("4-2-4") \ .success() # test device is quiet anet.td.start_state.doc("4-2-0") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=False, lifetime=30, )).doc("4-2-1") \ .receive(SimpleAckPDU).doc("4-2-2") \ .receive(UnconfirmedCOVNotificationRequest).doc("4-2-3") \ .set_event("e1").doc("4-2-4") \ .receive(UnconfirmedCOVNotificationRequest).doc("4-2-5") \ .timeout(10).doc("4-2-6") \ .success() # run the group anet.run() def test_changing_properties(self): """This test changes the value of multiple properties to verify that only one COV notification is sent.""" if _debug: TestAnalogValue._debug("test_changing_properties") # create a network anet = ApplicationNetwork("test_changing_properties") # add the ability to accept COV notifications to the TD anet.td.add_capability(COVTestClientServices) # tell the TD how to respond to confirmed notifications anet.td.test_ack = True anet.td.test_reject = None anet.td.test_abort = None # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # an easy way to change the present value def test_av_fault(): if _debug: TestAnalogValue._debug("test_av_fault") test_av.presentValue = 100.0 test_av.statusFlags = [0, 0, 1, 0] # add it to the implementation anet.iut.add_object(test_av) # receive the subscription request, wait until the client has # received the ack and the 'instant' notification. Then change the # value, no ack coming back anet.iut.start_state.doc("5-1-0") \ .receive(SubscribeCOVRequest).doc("5-1-1") \ .wait_event("e1").doc("5-1-2") \ .call(test_av_fault).doc("5-1-3") \ .timeout(10).doc("5-2-4") \ .success() # test device is quiet anet.td.start_state.doc("5-2-0") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=False, lifetime=30, )).doc("5-2-1") \ .receive(SimpleAckPDU).doc("5-2-2") \ .receive(UnconfirmedCOVNotificationRequest).doc("5-2-3") \ .set_event("e1").doc("5-2-4") \ .receive(UnconfirmedCOVNotificationRequest).doc("5-2-5") \ .timeout(10).doc("5-2-6") \ .success() # run the group anet.run() def test_multiple_subscribers(self): """This has more than one subscriber for the object.""" if _debug: TestAnalogValue._debug("test_multiple_subscribers") # create a network anet = ApplicationNetwork("test_multiple_subscribers") # add the ability to accept COV notifications to the TD anet.td.add_capability(COVTestClientServices) # tell the TD how to respond to confirmed notifications anet.td.test_ack = True anet.td.test_reject = None anet.td.test_abort = None # add the service capability to the IUT anet.iut.add_capability(ChangeOfValueServices) # make an analog value object test_av = AnalogValueObject( objectIdentifier=('analogValue', 1), objectName='av', presentValue=0.0, statusFlags=[0, 0, 0, 0], covIncrement=10.0, ) # an easy way to change both the present value and status flags # which should trigger only one notification def test_av_fault(): if _debug: TestAnalogValue._debug("test_av_fault") test_av.presentValue = 100.0 test_av.statusFlags = [0, 0, 1, 0] # add it to the implementation anet.iut.add_object(test_av) # add another test device object anet.td2_device_object = LocalDeviceObject( objectName="td2", objectIdentifier=('device', 30), maxApduLengthAccepted=1024, segmentationSupported='noSegmentation', vendorIdentifier=999, ) # another test device anet.td2 = ApplicationStateMachine(anet.td2_device_object, anet.vlan) anet.td2.add_capability(COVTestClientServices) anet.append(anet.td2) # receive the subscription requests, wait until both clients have # received the ack and the 'instant' notification. Then change the # value, no ack coming back anet.iut.start_state.doc("6-1-0") \ .receive(SubscribeCOVRequest, pduSource=anet.td.address).doc("6-1-1") \ .receive(SubscribeCOVRequest, pduSource=anet.td2.address).doc("6-1-2") \ .wait_event("e2").doc("6-1-3") \ .call(test_av_fault).doc("6-1-4") \ .timeout(10).doc("6-2-5") \ .success() # first test device; send the subscription request, get an ack # followed by the 'instant' notification anet.td.start_state.doc("6-2-0") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=False, lifetime=30, )).doc("6-2-1") \ .receive(SimpleAckPDU).doc("6-2-2") \ .receive(UnconfirmedCOVNotificationRequest).doc("6-2-3") \ .set_event("e1").doc("6-2-4") \ .receive(UnconfirmedCOVNotificationRequest).doc("6-2-5") \ .timeout(10).doc("6-2-6") \ .success() # same pattern for the other test device anet.td2.start_state.doc("6-3-0") \ .wait_event("e1").doc("6-3-1") \ .send(SubscribeCOVRequest( destination=anet.iut.address, subscriberProcessIdentifier=1, monitoredObjectIdentifier=('analogValue', 1), issueConfirmedNotifications=False, lifetime=30, )).doc("6-3-2") \ .receive(SimpleAckPDU).doc("6-3-3") \ .receive(UnconfirmedCOVNotificationRequest).doc("6-3-4") \ .set_event("e2").doc("6-3-5") \ .receive(UnconfirmedCOVNotificationRequest).doc("6-3-6") \ .timeout(10).doc("6-3-7") \ .success() # run the group anet.run()
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.8.2 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class V1ResourceQuota(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'spec': 'V1ResourceQuotaSpec', 'status': 'V1ResourceQuotaStatus' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'spec': 'spec', 'status': 'status' } def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None): """ V1ResourceQuota - a model defined in Swagger """ self._api_version = None self._kind = None self._metadata = None self._spec = None self._status = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if spec is not None: self.spec = spec if status is not None: self.status = status @property def api_version(self): """ Gets the api_version of this V1ResourceQuota. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources :return: The api_version of this V1ResourceQuota. :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """ Sets the api_version of this V1ResourceQuota. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources :param api_version: The api_version of this V1ResourceQuota. :type: str """ self._api_version = api_version @property def kind(self): """ Gets the kind of this V1ResourceQuota. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds :return: The kind of this V1ResourceQuota. :rtype: str """ return self._kind @kind.setter def kind(self, kind): """ Sets the kind of this V1ResourceQuota. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds :param kind: The kind of this V1ResourceQuota. :type: str """ self._kind = kind @property def metadata(self): """ Gets the metadata of this V1ResourceQuota. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata :return: The metadata of this V1ResourceQuota. :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """ Sets the metadata of this V1ResourceQuota. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata :param metadata: The metadata of this V1ResourceQuota. :type: V1ObjectMeta """ self._metadata = metadata @property def spec(self): """ Gets the spec of this V1ResourceQuota. Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status :return: The spec of this V1ResourceQuota. :rtype: V1ResourceQuotaSpec """ return self._spec @spec.setter def spec(self, spec): """ Sets the spec of this V1ResourceQuota. Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status :param spec: The spec of this V1ResourceQuota. :type: V1ResourceQuotaSpec """ self._spec = spec @property def status(self): """ Gets the status of this V1ResourceQuota. Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status :return: The status of this V1ResourceQuota. :rtype: V1ResourceQuotaStatus """ return self._status @status.setter def status(self, status): """ Sets the status of this V1ResourceQuota. Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status :param status: The status of this V1ResourceQuota. :type: V1ResourceQuotaStatus """ self._status = status def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, V1ResourceQuota): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
from abc import ABCMeta, abstractmethod from six import with_metaclass from os.path import join import numpy as np from MCEq.misc import theta_rad from MCEq.misc import info import mceq_config as config class EarthsAtmosphere(with_metaclass(ABCMeta)): """ Abstract class containing common methods on atmosphere. You have to inherit from this class and implement the virtual method :func:`get_density`. Note: Do not instantiate this class directly. Attributes: thrad (float): current zenith angle :math:`\\theta` in radiants theta_deg (float): current zenith angle :math:`\\theta` in degrees max_X (float): Slant depth at the surface according to the geometry defined in the :mod:`MCEq.geometry` geometry (object): Can be a custom instance of EarthGeometry """ def __init__(self, *args, **kwargs): from MCEq.geometry.geometry import EarthGeometry self.geom = kwargs.pop('geometry', EarthGeometry()) self.thrad = None self.theta_deg = None self._max_den = config.max_density self.max_theta = 90. self.location = None self.season = None @abstractmethod def get_density(self, h_cm): """Abstract method which implementation should return the density in g/cm**3. Args: h_cm (float): height in cm Returns: float: density in g/cm**3 Raises: NotImplementedError: """ raise NotImplementedError("Base class called.") def calculate_density_spline(self, n_steps=2000): """Calculates and stores a spline of :math:`\\rho(X)`. Args: n_steps (int, optional): number of :math:`X` values to use for interpolation Raises: Exception: if :func:`set_theta` was not called before. """ from scipy.integrate import cumtrapz from time import time from scipy.interpolate import UnivariateSpline if self.theta_deg is None: raise Exception('zenith angle not set') else: info( 5, 'Calculating spline of rho(X) for zenith {0:4.1f} degrees.'. format(self.theta_deg)) thrad = self.thrad path_length = self.geom.l(thrad) vec_rho_l = np.vectorize( lambda delta_l: self.get_density(self.geom.h(delta_l, thrad))) dl_vec = np.linspace(0, path_length, n_steps) now = time() # Calculate integral for each depth point X_int = cumtrapz(vec_rho_l(dl_vec), dl_vec) # dl_vec = dl_vec[1:] info(5, '.. took {0:1.2f}s'.format(time() - now)) # Save depth value at h_obs self._max_X = X_int[-1] self._max_den = self.get_density(self.geom.h(0, thrad)) # Interpolate with bi-splines without smoothing h_intp = [self.geom.h(dl, thrad) for dl in reversed(dl_vec[1:])] X_intp = [X for X in reversed(X_int[1:])] self._s_h2X = UnivariateSpline(h_intp, np.log(X_intp), k=2, s=0.0) self._s_X2rho = UnivariateSpline(X_int, vec_rho_l(dl_vec), k=2, s=0.0) self._s_lX2h = UnivariateSpline(np.log(X_intp)[::-1], h_intp[::-1], k=2, s=0.0) @property def max_X(self): """Depth at altitude 0.""" if not hasattr(self, '_max_X'): self.set_theta(0) return self._max_X @property def max_den(self): """Density at altitude 0.""" if not hasattr(self, '_max_den'): self.set_theta(0) return self._max_den @property def s_h2X(self): """Spline for conversion from altitude to depth.""" if not hasattr(self, '_s_h2X'): self.set_theta(0) return self._s_h2X @property def s_X2rho(self): """Spline for conversion from depth to density.""" if not hasattr(self, '_s_X2rho'): self.set_theta(0) return self._s_X2rho @property def s_lX2h(self): """Spline for conversion from depth to altitude.""" if not hasattr(self, '_s_lX2h'): self.set_theta(0) return self._s_lX2h def set_theta(self, theta_deg, force_spline_calc=False): """Configures geometry and initiates spline calculation for :math:`\\rho(X)`. If the option 'use_atm_cache' is enabled in the config, the function will check, if a corresponding spline is available in the cache and use it. Otherwise it will call :func:`calculate_density_spline`, make the function :func:`r_X2rho` available to the core code and store the spline in the cache. Args: theta_deg (float): zenith angle :math:`\\theta` at detector force_spline_calc (bool): forces (re-)calculation of the spline for each call """ if theta_deg < 0. or theta_deg > self.max_theta: raise Exception('Zenith angle not in allowed range.') self.thrad = theta_rad(theta_deg) self.theta_deg = theta_deg self.calculate_density_spline() def r_X2rho(self, X): """Returns the inverse density :math:`\\frac{1}{\\rho}(X)`. The spline `s_X2rho` is used, which was calculated or retrieved from cache during the :func:`set_theta` call. Args: X (float): slant depth in g/cm**2 Returns: float: :math:`1/\\rho` in cm**3/g """ return 1. / self.s_X2rho(X) def h2X(self, h): """Returns the depth along path as function of height above surface. The spline `s_X2rho` is used, which was calculated or retrieved from cache during the :func:`set_theta` call. Args: h (float): vertical height above surface in cm Returns: float: X slant depth in g/cm**2 """ return np.exp(self.s_h2X(h)) def X2h(self, X): """Returns the height above surface as a function of slant depth for currently selected zenith angle. The spline `s_lX2h` is used, which was calculated or retrieved from cache during the :func:`set_theta` call. Args: X (float): slant depth in g/cm**2 Returns: float h: height above surface in cm """ return self.s_lX2h(np.log(X)) def X2rho(self, X): """Returns the density :math:`\\rho(X)`. The spline `s_X2rho` is used, which was calculated or retrieved from cache during the :func:`set_theta` call. Args: X (float): slant depth in g/cm**2 Returns: float: :math:`\\rho` in cm**3/g """ return self.s_X2rho(X) def moliere_air(self, h_cm): """Returns the Moliere unit of air for US standard atmosphere. """ return 9.3 / (self.get_density(h_cm) * 100.) def nref_rel_air(self, h_cm): """Returns the refractive index - 1 in air (density parametrization as in CORSIKA). """ return 0.000283 * self.get_density(h_cm) / self.get_density(0) def gamma_cherenkov_air(self, h_cm): """Returns the Lorentz factor gamma of Cherenkov threshold in air (MeV). """ nrel = self.nref_rel_air(h_cm) return (1. + nrel) / np.sqrt(2. * nrel + nrel**2) def theta_cherenkov_air(self, h_cm): """Returns the Cherenkov angle in air (degrees). """ return np.arccos(1. / (1. + self.nref_rel_air(h_cm))) * 180. / np.pi class CorsikaAtmosphere(EarthsAtmosphere): """Class, holding the parameters of a Linsley type parameterization similar to the Air-Shower Monte Carlo `CORSIKA <https://web.ikp.kit.edu/corsika/>`_. The parameters pre-defined parameters are taken from the CORSIKA manual. If new sets of parameters are added to :func:`init_parameters`, the array _thickl can be calculated using :func:`calc_thickl` . Attributes: _atm_param (numpy.array): (5x5) Stores 5 atmospheric parameters _aatm, _batm, _catm, _thickl, _hlay for each of the 5 layers Args: location (str): see :func:`init_parameters` season (str,optional): see :func:`init_parameters` """ _atm_param = None def __init__(self, location, season=None): cka_atmospheres = [ ("USStd", None), ("BK_USStd", None), ("Karlsruhe", None), ("ANTARES/KM3NeT-ORCA", 'Summer'), ("ANTARES/KM3NeT-ORCA", 'Winter'), ("KM3NeT-ARCA", 'Summer'), ("KM3NeT-ARCA", 'Winter'), ("KM3NeT",None), ('SouthPole','December'), ('PL_SouthPole','January'), ('PL_SouthPole','August'), ] assert (location, season) in cka_atmospheres, \ '{0}/{1} not available for CorsikaAtmsophere'.format( location, season ) self.init_parameters(location, season) import MCEq.geometry.corsikaatm.corsikaatm as corsika_acc self.corsika_acc = corsika_acc EarthsAtmosphere.__init__(self) def init_parameters(self, location, season): """Initializes :attr:`_atm_param`. Parameters from ANTARES/KM3NET are based on the work of T. Heid (`see this issue <https://github.com/afedynitch/MCEq/issues/12>`_) +---------------------+-------------------+------------------------------+ | location | CORSIKA Table | Description/season | +=====================+===================+==============================+ | "USStd" | 23 | US Standard atmosphere | +---------------------+-------------------+------------------------------+ | "BK_USStd" | 37 | Bianca Keilhauer's USStd | +---------------------+-------------------+------------------------------+ | "Karlsruhe" | 24 | AT115 / Karlsruhe | +---------------------+-------------------+------------------------------+ | "SouthPole" | 26 and 28 | MSIS-90-E for Dec and June | +---------------------+-------------------+------------------------------+ |"PL_SouthPole" | 29 and 30 | P. Lipari's Jan and Aug | +---------------------+-------------------+------------------------------+ |"ANTARES/KM3NeT-ORCA"| NA | PhD T. Heid | +---------------------+-------------------+------------------------------+ | "KM3NeT-ARCA" | NA | PhD T. Heid | +---------------------+-------------------+------------------------------+ Args: location (str): see table season (str, optional): choice of season for supported locations Raises: Exception: if parameter set not available """ _aatm, _batm, _catm, _thickl, _hlay = None, None, None, None, None if location == "USStd": _aatm = np.array([-186.5562, -94.919, 0.61289, 0.0, 0.01128292]) _batm = np.array([1222.6562, 1144.9069, 1305.5948, 540.1778, 1.0]) _catm = np.array([994186.38, 878153.55, 636143.04, 772170., 1.0e9]) _thickl = np.array( [1036.102549, 631.100309, 271.700230, 3.039494, 0.001280]) _hlay = np.array([0, 4.0e5, 1.0e6, 4.0e6, 1.0e7]) elif location == "BK_USStd": _aatm = np.array([ -149.801663, -57.932486, 0.63631894, 4.3545369e-4, 0.01128292 ]) _batm = np.array([1183.6071, 1143.0425, 1322.9748, 655.69307, 1.0]) _catm = np.array( [954248.34, 800005.34, 629568.93, 737521.77, 1.0e9]) _thickl = np.array( [1033.804941, 418.557770, 216.981635, 4.344861, 0.001280]) _hlay = np.array([0.0, 7.0e5, 1.14e6, 3.7e6, 1.0e7]) elif location == "Karlsruhe": _aatm = np.array( [-118.1277, -154.258, 0.4191499, 5.4094056e-4, 0.01128292]) _batm = np.array([1173.9861, 1205.7625, 1386.7807, 555.8935, 1.0]) _catm = np.array([919546., 963267.92, 614315., 739059.6, 1.0e9]) _thickl = np.array( [1055.858707, 641.755364, 272.720974, 2.480633, 0.001280]) _hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7]) elif location == "KM3NeT": # averaged over detector and season _aatm = np.array([-141.31449999999998, -8.256029999999999, 0.6132505, -0.025998975, 0.4024275]) _batm = np.array([1153.0349999999999, 1263.3325, 1257.0724999999998, 404.85974999999996, 1.0]) _catm = np.array([967990.75, 668591.75, 636790.0, 814070.75, 21426175.0]) _thickl = np.array([1011.8521512499999, 275.84507575000003, 51.0230705, 2.983134, 0.21927724999999998]) _hlay = np.array([0.0, 993750.0, 2081250.0, 4150000.0, 6877500.0]) elif location == "ANTARES/KM3NeT-ORCA": if season == 'Summer': _aatm = np.array([-158.85, -5.38682, 0.889893, -0.0286665, 0.50035]) _batm = np.array([1145.62, 1176.79, 1248.92, 415.543, 1.0]) _catm = np.array([998469.0, 677398.0, 636790.0, 823489.0, 16090500.0]) _thickl = np.array([986.951713, 306.4668, 40.546793, 4.288721, 0.277182]) _hlay = np.array([0, 9.0e5, 22.0e5, 38.0e5, 68.2e5]) elif season == 'Winter': _aatm = np.array([-132.16, -2.4787, 0.298031, -0.0220264, 0.348021]) _batm = np.array([1120.45, 1203.97, 1163.28, 360.027, 1.0]) _catm = np.array([933697.0, 643957.0, 636790.0, 804486.0, 23109000.0]) _thickl = np.array([988.431172, 273.033464, 37.185105, 1.162987, 0.192998]) _hlay = np.array([0, 9.5e5, 22.0e5, 47.0e5, 68.2e5]) elif location == "KM3NeT-ARCA": if season == 'Summer': _aatm = np.array([-157.857, -28.7524, 0.790275, -0.0286999, 0.481114]) _batm = np.array([1190.44, 1171.0, 1344.78, 445.357, 1.0]) _catm = np.array([1006100.0, 758614.0, 636790.0, 817384.0, 16886800.0]) _thickl = np.array([1032.679434, 328.978681, 80.601135, 4.420745, 0.264112]) _hlay = np.array([0, 9.0e5, 18.0e5, 38.0e5, 68.2e5]) elif season == 'Winter': _aatm = np.array([-116.391, 3.5938, 0.474803, -0.0246031, 0.280225]) _batm = np.array([1155.63, 1501.57, 1271.31, 398.512, 1.0]) _catm = np.array([933697.0, 594398.0, 636790.0, 810924.0, 29618400.0]) _thickl = np.array([1039.346286, 194.901358, 45.759249, 2.060083, 0.142817]) _hlay = np.array([0, 12.25e5, 21.25e5, 43.0e5, 70.5e5]) elif location == 'SouthPole': if season == 'December': _aatm = np.array( [-128.601, -39.5548, 1.13088, -0.00264960, 0.00192534]) _batm = np.array([1139.99, 1073.82, 1052.96, 492.503, 1.0]) _catm = np.array( [861913., 744955., 675928., 829627., 5.8587010e9]) _thickl = np.array( [1011.398804, 588.128367, 240.955360, 3.964546, 0.000218]) _hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7]) elif season == "June": _aatm = np.array( [-163.331, -65.3713, 0.402903, -0.000479198, 0.00188667]) _batm = np.array([1183.70, 1108.06, 1424.02, 207.595, 1.0]) _catm = np.array( [875221., 753213., 545846., 793043., 5.9787908e9]) _thickl = np.array( [1020.370363, 586.143464, 228.374393, 1.338258, 0.000214]) _hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7]) else: raise Exception('CorsikaAtmosphere(): Season "' + season + '" not parameterized for location SouthPole.') elif location == 'PL_SouthPole': if season == 'January': _aatm = np.array( [-113.139, -7930635, -54.3888, -0.0, 0.00421033]) _batm = np.array([1133.10, 1101.20, 1085.00, 1098.00, 1.0]) _catm = np.array( [861730., 826340., 790950., 682800., 2.6798156e9]) _thickl = np.array([ 1019.966898, 718.071682, 498.659703, 340.222344, 0.000478 ]) _hlay = np.array([0.0, 2.67e5, 5.33e5, 8.0e5, 1.0e7]) elif season == "August": _aatm = np.array( [-59.0293, -21.5794, -7.14839, 0.0, 0.000190175]) _batm = np.array([1079.0, 1071.90, 1182.0, 1647.1, 1.0]) _catm = np.array( [764170., 699910., 635650., 551010., 59.329575e9]) _thickl = np.array( [1019.946057, 391.739652, 138.023515, 43.687992, 0.000022]) _hlay = np.array([0.0, 6.67e5, 13.33e5, 2.0e6, 1.0e7]) else: raise Exception('CorsikaAtmosphere(): Season "' + season + '" not parameterized for location SouthPole.') else: raise Exception("CorsikaAtmosphere:init_parameters(): Location " + str(location) + " not parameterized.") self._atm_param = np.array([_aatm, _batm, _catm, _thickl, _hlay]) self.location, self.season = location, season # Clear cached theta value to force spline recalculation self.theta_deg = None def depth2height(self, x_v): """Converts column/vertical depth to height. Args: x_v (float): column depth :math:`X_v` in g/cm**2 Returns: float: height in cm """ _aatm, _batm, _catm, _thickl, _hlay = self._atm_param if x_v >= _thickl[1]: height = _catm[0] * np.log(_batm[0] / (x_v - _aatm[0])) elif x_v >= _thickl[2]: height = _catm[1] * np.log(_batm[1] / (x_v - _aatm[1])) elif x_v >= _thickl[3]: height = _catm[2] * np.log(_batm[2] / (x_v - _aatm[2])) elif x_v >= _thickl[4]: height = _catm[3] * np.log(_batm[3] / (x_v - _aatm[3])) else: height = (_aatm[4] - x_v) * _catm[4] return height def get_density(self, h_cm): """ Returns the density of air in g/cm**3. Uses the optimized module function :func:`corsika_get_density_jit`. Args: h_cm (float): height in cm Returns: float: density :math:`\\rho(h_{cm})` in g/cm**3 """ return self.corsika_acc.corsika_get_density(h_cm, *self._atm_param) # return corsika_get_density_jit(h_cm, self._atm_param) def get_mass_overburden(self, h_cm): """ Returns the mass overburden in atmosphere in g/cm**2. Uses the optimized module function :func:`corsika_get_m_overburden_jit` Args: h_cm (float): height in cm Returns: float: column depth :math:`T(h_{cm})` in g/cm**2 """ return self.corsika_acc.corsika_get_m_overburden(h_cm, *self._atm_param) # return corsika_get_m_overburden_jit(h_cm, self._atm_param) def rho_inv(self, X, cos_theta): """Returns reciprocal density in cm**3/g using planar approximation. This function uses the optimized function :func:`planar_rho_inv_jit` Args: h_cm (float): height in cm Returns: float: :math:`\\frac{1}{\\rho}(X,\\cos{\\theta})` cm**3/g """ return self.corsika_acc.planar_rho_inv(X, cos_theta, *self._atm_param) # return planar_rho_inv_jit(X, cos_theta, self._atm_param) def calc_thickl(self): """Calculates thickness layers for :func:`depth2height` The analytical inversion of the CORSIKA parameterization relies on the knowledge about the depth :math:`X`, where trasitions between layers/exponentials occur. Example: Create a new set of parameters in :func:`init_parameters` inserting arbitrary values in the _thikl array:: $ cor_atm = CorsikaAtmosphere(new_location, new_season) $ cor_atm.calc_thickl() Replace _thickl values with printout. """ from scipy.integrate import quad thickl = [] for h in self._atm_param[4]: thickl.append('{0:4.6f}'.format( quad(self.get_density, h, 112.8e5, epsrel=1e-4)[0])) info(5, '_thickl = np.array([' + ', '.join(thickl) + '])') return thickl class IsothermalAtmosphere(EarthsAtmosphere): """Isothermal model of the atmosphere. This model is widely used in semi-analytical calculations. The isothermal approximation is valid in a certain range of altitudes and usually one adjust the parameters to match a more realistic density profile at altitudes between 10 - 30 km, where the high energy muon production rate peaks. Such parametrizations are given in the book "Cosmic Rays and Particle Physics", Gaisser, Engel and Resconi (2016). The default values are from M. Thunman, G. Ingelman, and P. Gondolo, Astropart. Physics 5, 309 (1996). Args: location (str): no effect season (str): no effect hiso_km (float): isothermal scale height in km X0 (float): Ground level overburden """ def __init__(self, location, season, hiso_km=6.3, X0=1300.): self.hiso_cm = hiso_km * 1e5 self.X0 = X0 self.location = location self.season = season EarthsAtmosphere.__init__(self) def get_density(self, h_cm): """ Returns the density of air in g/cm**3. Args: h_cm (float): height in cm Returns: float: density :math:`\\rho(h_{cm})` in g/cm**3 """ return self.X0 / self.hiso_cm * np.exp(-h_cm / self.hiso_cm) def get_mass_overburden(self, h_cm): """ Returns the mass overburden in atmosphere in g/cm**2. Args: h_cm (float): height in cm Returns: float: column depth :math:`T(h_{cm})` in g/cm**2 """ return self.X0 * np.exp(-h_cm / self.hiso_cm) class MSIS00Atmosphere(EarthsAtmosphere): """Wrapper class for a python interface to the NRLMSISE-00 model. `NRLMSISE-00 <http://ccmc.gsfc.nasa.gov/modelweb/atmos/nrlmsise00.html>`_ is an empirical model of the Earth's atmosphere. It is available as a FORTRAN 77 code or as a verson traslated into `C by Dominik Borodowski <http://www.brodo.de/english/pub/nrlmsise/>`_. Here a PYTHON wrapper has been used. Attributes: _msis : NRLMSISE-00 python wrapper object handler Args: location (str): see :func:`init_parameters` season (str,optional): see :func:`init_parameters` """ def __init__(self, location, season=None, doy=None, use_loc_altitudes=False): from MCEq.geometry.nrlmsise00_mceq import cNRLMSISE00 msis_atmospheres = [ 'SouthPole', 'Karlsruhe', 'Geneva', 'Tokyo', 'SanGrasso', 'TelAviv', 'KSC', 'SoudanMine', 'Tsukuba', 'LynnLake', 'PeaceRiver', 'FtSumner' ] assert location in msis_atmospheres, \ '{0} not available for MSIS00Atmosphere'.format( location ) self._msis = cNRLMSISE00() self.init_parameters(location, season, doy, use_loc_altitudes) EarthsAtmosphere.__init__(self) def init_parameters(self, location, season, doy, use_loc_altitudes): """Sets location and season in :class:`NRLMSISE-00`. Translates location and season into day of year and geo coordinates. Args: location (str): Supported are "SouthPole" and "Karlsruhe" season (str): months of the year: January, February, etc. use_loc_altitudes (bool): If to use default altitudes from location """ self._msis.set_location(location) if season is not None: self._msis.set_season(season) else: self._msis.set_doy(doy) self.location, self.season = location, season # Clear cached value to force spline recalculation self.theta_deg = None if use_loc_altitudes: info(0, 'Using loc altitude', self._msis.alt_surface, 'cm') self.geom.h_obs = self._msis.alt_surface def get_density(self, h_cm): """ Returns the density of air in g/cm**3. Wraps around ctypes calls to the NRLMSISE-00 C library. Args: h_cm (float): height in cm Returns: float: density :math:`\\rho(h_{cm})` in g/cm**3 """ return self._msis.get_density(h_cm) def set_location(self, location): """ Changes MSIS location by strings defined in _msis_wrapper. Args: location (str): location as defined in :class:`NRLMSISE-00.` """ self._msis.set_location(location) def set_season(self, month): """ Changes MSIS location by month strings defined in _msis_wrapper. Args: location (str): month as defined in :class:`NRLMSISE-00.` """ self._msis.set_season(month) def set_doy(self, day_of_year): """ Changes MSIS season by day of year. Args: day_of_year (int): 1. Jan.=0, 1.Feb=32 """ self._msis.set_doy(day_of_year) def get_temperature(self, h_cm): """ Returns the temperature of air in K. Wraps around ctypes calls to the NRLMSISE-00 C library. Args: h_cm (float): height in cm Returns: float: density :math:`T(h_{cm})` in K """ return self._msis.get_temperature(h_cm) class AIRSAtmosphere(EarthsAtmosphere): """Interpolation class for tabulated atmospheres. This class is intended to read preprocessed AIRS Satellite data. Args: location (str): see :func:`init_parameters` season (str,optional): see :func:`init_parameters` """ def __init__(self, location, season, extrapolate=True, *args, **kwargs): if location != 'SouthPole': raise Exception(self.__class__.__name__ + "(): Only South Pole location supported. " + location) self.extrapolate = extrapolate self.month2doy = { 'January': 1, 'February': 32, 'March': 60, 'April': 91, 'May': 121, 'June': 152, 'July': 182, 'August': 213, 'September': 244, 'October': 274, 'November': 305, 'December': 335 } self.season = season self.init_parameters(location, **kwargs) EarthsAtmosphere.__init__(self) def init_parameters(self, location, **kwargs): """Loads tables and prepares interpolation. Args: location (str): supported is only "SouthPole" doy (int): Day Of Year """ # from time import strptime from matplotlib.dates import datestr2num, num2date from os import path def bytespdate2num(b): return datestr2num(b.decode('utf-8')) data_path = (join( path.expanduser('~'), 'OneDrive/Dokumente/projects/atmospheric_variations/')) if 'table_path' in kwargs: data_path = kwargs['table_path'] files = [('dens', 'airs_amsu_dens_180_daily.txt'), ('temp', 'airs_amsu_temp_180_daily.txt'), ('alti', 'airs_amsu_alti_180_daily.txt')] data_collection = {} # limit SouthPole pressure to <= 600 min_press_idx = 4 IC79_idx_1 = None IC79_idx_2 = None for d_key, fname in files: fname = data_path + 'tables/' + fname # tabf = open(fname).read() tab = np.loadtxt(fname, converters={0: bytespdate2num}, usecols=[0] + list(range(2, 27))) # with open(fname, 'r') as f: # comline = f.readline() # p_levels = [ # float(s.strip()) for s in comline.split(' ')[3:] if s != '' # ][min_press_idx:] dates = num2date(tab[:, 0]) for di, date in enumerate(dates): if date.month == 6 and date.day == 1: if date.year == 2010: IC79_idx_1 = di elif date.year == 2011: IC79_idx_2 = di surf_val = tab[:, 1] cols = tab[:, min_press_idx + 2:] data_collection[d_key] = (dates, surf_val, cols) self.interp_tab_d = {} self.interp_tab_t = {} self.dates = {} dates = data_collection['alti'][0] msis = MSIS00Atmosphere(location, 'January') for didx, date in enumerate(dates): h_vec = np.array(data_collection['alti'][2][didx, :] * 1e2) d_vec = np.array(data_collection['dens'][2][didx, :]) t_vec = np.array(data_collection['temp'][2][didx, :]) if self.extrapolate: # Extrapolate using msis h_extra = np.linspace(h_vec[-1], self.geom.h_atm * 1e2, 250) msis._msis.set_doy(self._get_y_doy(date)[1] - 1) msis_extra_d = np.array([msis.get_density(h) for h in h_extra]) msis_extra_t = np.array( [msis.get_temperature(h) for h in h_extra]) # Interpolate last few altitude bins ninterp = 5 for ni in range(ninterp): cl = (1 - np.exp(-ninterp + ni + 1)) ch = (1 - np.exp(-ni)) norm = 1. / (cl + ch) d_vec[-ni - 1] = (d_vec[-ni - 1] * cl * norm + msis.get_density(h_vec[-ni - 1]) * ch * norm) t_vec[-ni - 1] = ( t_vec[-ni - 1] * cl * norm + msis.get_temperature(h_vec[-ni - 1]) * ch * norm) # Merge the two datasets h_vec = np.hstack([h_vec[:-1], h_extra]) d_vec = np.hstack([d_vec[:-1], msis_extra_d]) t_vec = np.hstack([t_vec[:-1], msis_extra_t]) self.interp_tab_d[self._get_y_doy(date)] = (h_vec, d_vec) self.interp_tab_t[self._get_y_doy(date)] = (h_vec, t_vec) self.dates[self._get_y_doy(date)] = date self.IC79_start = self._get_y_doy(dates[IC79_idx_1]) self.IC79_end = self._get_y_doy(dates[IC79_idx_2]) self.IC79_days = (dates[IC79_idx_2] - dates[IC79_idx_1]).days self.location = location if self.season is None: self.set_IC79_day(0) else: self.set_season(self.season) # Clear cached value to force spline recalculation self.theta_deg = None def set_date(self, year, doy): self.h, self.dens = self.interp_tab_d[(year, doy)] _, self.temp = self.interp_tab_t[(year, doy)] self.date = self.dates[(year, doy)] # Compatibility with caching self.season = self.date def _set_doy(self, doy, year=2010): self.h, self.dens = self.interp_tab_d[(year, doy)] _, self.temp = self.interp_tab_t[(year, doy)] self.date = self.dates[(year, doy)] def set_season(self, month): self.season = month self._set_doy(self.month2doy[month]) self.season = month def set_IC79_day(self, IC79_day): import datetime if IC79_day > self.IC79_days: raise Exception(self.__class__.__name__ + "::set_IC79_day(): IC79_day above range.") target_day = self._get_y_doy(self.dates[self.IC79_start] + datetime.timedelta(days=IC79_day)) info(2, 'setting IC79_day', IC79_day) self.h, self.dens = self.interp_tab_d[target_day] _, self.temp = self.interp_tab_t[target_day] self.date = self.dates[target_day] # Compatibility with caching self.season = self.date def _get_y_doy(self, date): return date.timetuple().tm_year, date.timetuple().tm_yday def get_density(self, h_cm): """ Returns the density of air in g/cm**3. Interpolates table at requested value for previously set year and day of year (doy). Args: h_cm (float): height in cm Returns: float: density :math:`\\rho(h_{cm})` in g/cm**3 """ ret = np.exp(np.interp(h_cm, self.h, np.log(self.dens))) try: ret[h_cm > self.h[-1]] = np.nan except TypeError: if h_cm > self.h[-1]: return np.nan return ret def get_temperature(self, h_cm): """ Returns the temperature in K. Interpolates table at requested value for previously set year and day of year (doy). Args: h_cm (float): height in cm Returns: float: temperature :math:`T(h_{cm})` in K """ ret = np.exp(np.interp(h_cm, self.h, np.log(self.temp))) try: ret[h_cm > self.h[-1]] = np.nan except TypeError: if h_cm > self.h[-1]: return np.nan return ret class MSIS00IceCubeCentered(MSIS00Atmosphere): """Extension of :class:`MSIS00Atmosphere` which couples the latitude setting with the zenith angle of the detector. Args: location (str): see :func:`init_parameters` season (str,optional): see :func:`init_parameters` """ def __init__(self, location, season): if location != 'SouthPole': info(2, 'location forced to the South Pole') location = 'SouthPole' MSIS00Atmosphere.__init__(self, location, season) # Allow for upgoing zenith angles self.max_theta = 180. def latitude(self, det_zenith_deg): """ Returns the geographic latitude of the shower impact point. Assumes a spherical earth. The detector is 1948m under the surface. Credits: geometry fomulae by Jakob van Santen, DESY Zeuthen. Args: det_zenith_deg (float): zenith angle at detector in degrees Returns: float: latitude of the impact point in degrees """ r = self.geom.r_E d = 1948 # m theta_rad = det_zenith_deg / 180. * np.pi x = (np.sqrt(2. * r * d + ((r - d) * np.cos(theta_rad))**2 - d**2) - (r - d) * np.cos(theta_rad)) return -90. + np.arctan2(x * np.sin(theta_rad), r - d + x * np.cos(theta_rad)) / np.pi * 180. def set_theta(self, theta_deg, force_spline_calc=True): self._msis.set_location_coord(longitude=0., latitude=self.latitude(theta_deg)) info( 1, 'latitude = {0:5.2f} for zenith angle = {1:5.2f}'.format( self.latitude(theta_deg), theta_deg)) if theta_deg > 90.: info( 1, 'theta = {0:5.2f} below horizon. using theta = {1:5.2f}'. format(theta_deg, 180. - theta_deg)) theta_deg = 180. - theta_deg MSIS00Atmosphere.set_theta(self, theta_deg, force_spline_calc=force_spline_calc) class GeneralizedTarget(object): """This class provides a way to run MCEq on piece-wise constant one-dimenional density profiles. The default values for the average density are taken from config file variables `len_target`, `env_density` and `env_name`. The density profile has to be built by calling subsequently :func:`add_material`. The current composition of the target can be checked with :func:`draw_materials` or :func:`print_table`. Note: If the target is not air or hydrogen, the result is approximate, since seconray particle yields are provided for nucleon-air or proton-proton collisions. Depending on this choice one has to adjust the nuclear mass in :mod:`mceq_config`. Args: len_target (float): total length of the target in meters env_density (float): density of the default material in g/cm**3 env_name (str): title for this environment """ def __init__( self, len_target=config.len_target * 1e2, # cm env_density=config.env_density, # g/cm3 env_name=config.env_name): self.len_target = len_target self.env_density = env_density self.env_name = env_name self.reset() @property def max_den(self): return self._max_den def reset(self): """Resets material list to defaults.""" self.mat_list = [[ 0., self.len_target, self.env_density, self.env_name ]] self._update_variables() def _update_variables(self): """Updates internal variables. Not needed to call by user.""" self.start_bounds, self.end_bounds, \ self.densities = list(zip(*self.mat_list))[:-1] self.densities = np.array(self.densities) self.start_bounds = np.array(self.start_bounds) self.end_bounds = np.array(self.end_bounds) self._max_den = np.max(self.densities) self._integrate() def set_length(self, new_length_cm): """Updates the total length of the target. Usually the length is set """ if new_length_cm < self.mat_list[-1][0]: raise Exception( "GeneralizedTarget::set_length(): " + "can not set length below lower boundary of last " + "material.") self.len_target = new_length_cm self.mat_list[-1][1] = new_length_cm self._update_variables() def add_material(self, start_position_cm, density, name): """Adds one additional material to a composite target. Args: start_position_cm (float): position where the material starts counted from target origin l|X = 0 in cm density (float): density of material in g/cm**3 name (str): any user defined name Raises: Exception: If requested start_position_cm is not properly defined. """ if start_position_cm < 0. or start_position_cm > self.len_target: raise Exception("GeneralizedTarget::add_material(): " + "distance exceeds target dimensions.") elif (start_position_cm == self.mat_list[-1][0] and self.mat_list[-1][-1] == self.env_name): self.mat_list[-1] = [ start_position_cm, self.len_target, density, name ] elif start_position_cm <= self.mat_list[-1][0]: raise Exception("GeneralizedTarget::add_material(): " + "start_position_cm is ahead of previous material.") else: self.mat_list[-1][1] = start_position_cm self.mat_list.append( [start_position_cm, self.len_target, density, name]) info(2, ("{0}::add_material(): Material '{1}' added. " + "location on path {2} to {3} m").format(self.__class__.__name__, name, self.mat_list[-1][0], self.mat_list[-1][1])) self._update_variables() def set_theta(self, *args): """This method is not defined for the generalized target. The purpose is to catch usage errors. Raises: NotImplementedError: always """ raise NotImplementedError('GeneralizedTarget::set_theta(): Method' + 'not defined for this target class.') def _integrate(self): """Walks through material list and computes the depth along the position (path). Computes the spline for the position-depth relation and determines the maximum depth for the material selection. Method does not need to be called by the user, instead the class calls it when necessary. """ from scipy.interpolate import UnivariateSpline self.density_depth = None self.knots = [0.] self.X_int = [0.] for start, end, density, _ in self.mat_list: self.knots.append(end) self.X_int.append(density * (end - start) + self.X_int[-1]) self._s_X2h = UnivariateSpline(self.X_int, self.knots, k=1, s=0.) self._s_h2X = UnivariateSpline(self.knots, self.X_int, k=1, s=0.) self._max_X = self.X_int[-1] @property def s_X2h(self): """Spline for depth at distance.""" if not hasattr(self, '_s_X2h'): self._integrate() return self._s_X2h @property def s_h2X(self): """Spline for distance at depth.""" if not hasattr(self, '_s_h2X'): self._integrate() return self._s_h2X @property def max_X(self): """Maximal depth of target.""" if not hasattr(self, '_max_X'): self._integrate() return self._max_X def get_density_X(self, X): """Returns the density in g/cm**3 as a function of depth X. Args: X (float): depth in g/cm**2 Returns: float: density in g/cm**3 Raises: Exception: If requested depth exceeds target. """ X = np.atleast_1d(X) # allow for some small constant extrapolation for odepack solvers if X[-1] > self.max_X and X[-1] < self.max_X * 1.003: X[-1] = self.max_X if np.min(X) < 0. or np.max(X) > self.max_X: # return self.get_density(self.s_X2h(self.max_X)) info(0, 'Depth {0:4.3f} exceeds target dimensions {1:4.3f}'.format( np.max(X), self.max_X )) raise Exception('Invalid input') return self.get_density(self.s_X2h(X)) def r_X2rho(self, X): """Returns the inverse density :math:`\\frac{1}{\\rho}(X)`. Args: X (float): slant depth in g/cm**2 Returns: float: :math:`1/\\rho` in cm**3/g """ return 1. / self.get_density_X(X) def get_density(self, l_cm): """Returns the density in g/cm**3 as a function of position l in cm. Args: l (float): position in target in cm Returns: float: density in g/cm**3 Raises: Exception: If requested position exceeds target length. """ l_cm = np.atleast_1d(l_cm) res = np.zeros_like(l_cm) if np.min(l_cm) < 0 or np.max(l_cm) > self.len_target: raise Exception("GeneralizedTarget::get_density(): " + "requested position exceeds target legth.") for i, li in enumerate(l_cm): bi = 0 while not (li >= self.start_bounds[bi] and li <= self.end_bounds[bi]): bi += 1 res[i] = self.densities[bi] return res def draw_materials(self, axes=None, logx=False): """Makes a plot of depth and density profile as a function of the target length. The list of materials is printed out, too. Args: axes (plt.axes, optional): handle for matplotlib axes """ import matplotlib.pyplot as plt if not axes: plt.figure(figsize=(5, 2.5)) axes = plt.gca() ymax = np.max(self.X_int) * 1.01 for _, mat in enumerate(self.mat_list): xstart = mat[0] xend = mat[1] alpha = 0.188 * mat[2] / max(self.densities) + 0.248 if alpha > 1: alpha = 1. elif alpha < 0.: alpha = 0. axes.fill_between((xstart, xend), (ymax, ymax), (0., 0.), label=mat[2], facecolor='grey', alpha=alpha) # axes.text(0.5e-2 * (xstart + xend), 0.5 * ymax, str(nm)) axes.plot([xl for xl in self.knots], self.X_int, lw=1.7, color='r') if logx: axes.set_xscale('log', nonposx='clip') axes.set_ylim(0., ymax) axes.set_xlabel('distance in target (cm)') axes.set_ylabel(r'depth X (g/cm$^2)$') self.print_table(min_dbg_lev=2) def print_table(self, min_dbg_lev=0): """Prints table of materials to standard output. """ templ = '{0:^3} | {1:15} | {2:^9.3g} | {3:^9.3g} | {4:^8.5g}' info( min_dbg_lev, '********************* List of materials ***********************', no_caller=True) head = '{0:3} | {1:15} | {2:9} | {3:9} | {4:9}'.format( 'no', 'name', 'start [cm]', 'end [cm]', 'density [g/cm**3]') info(min_dbg_lev, '-' * len(head), no_caller=True) info(min_dbg_lev, head, no_caller=True) info(min_dbg_lev, '-' * len(head), no_caller=True) for nm, mat in enumerate(self.mat_list): info(min_dbg_lev, templ.format(nm, mat[3], mat[0], mat[1], mat[2]), no_caller=True) if __name__ == '__main__': import matplotlib.pyplot as plt plt.figure(figsize=(5, 4)) plt.title('CORSIKA atmospheres') cka_atmospheres = [ ("USStd", None), ("BK_USStd", None), ("Karlsruhe", None), ("ANTARES/KM3NeT-ORCA", 'Summer'), ("ANTARES/KM3NeT-ORCA", 'Winter'), ("KM3NeT-ARCA", 'Summer'), ("KM3NeT-ARCA", 'Winter'), ("KM3NeT", None), ('SouthPole','December'), ('PL_SouthPole','January'), ('PL_SouthPole','August'), ] cka_surf_100 = [] for loc, season in cka_atmospheres: cka_obj = CorsikaAtmosphere(loc, season) cka_obj.set_theta(0.0) x_vec = np.linspace(0, cka_obj.max_X, 5000) plt.plot(x_vec, 1 / cka_obj.r_X2rho(x_vec), lw=1.5, label='{0}/{1}'.format(loc, season) if season is not None else '{0}'.format(loc)) cka_surf_100.append((cka_obj.max_X, 1. / cka_obj.r_X2rho(100.))) print(cka_surf_100) plt.ylabel(r'Density $\rho$ (g/cm$^3$)') plt.xlabel(r'Depth (g/cm$^2$)') plt.legend(loc='upper left') plt.tight_layout() plt.figure(figsize=(5, 4)) plt.title('NRLMSISE-00 atmospheres') msis_atmospheres = [ ('SouthPole', "January"), ('Karlsruhe', "January"), ('Geneva', "January"), ('Tokyo', "January"), ('SanGrasso', "January"), ('TelAviv', "January"), ('KSC', "January"), ('SoudanMine', "January"), ('Tsukuba', "January"), ('LynnLake', "January"), ('PeaceRiver', "January"), ('FtSumner', "January") ] msis_surf_100 = [] for loc, season in msis_atmospheres: msis_obj = MSIS00Atmosphere(loc, season) msis_obj.set_theta(0.0) x_vec = np.linspace(0, msis_obj.max_X, 5000) plt.plot(x_vec, 1 / msis_obj.r_X2rho(x_vec), lw=1.5, label='{0}'.format(loc)) msis_surf_100.append((msis_obj.max_X, 1. / msis_obj.r_X2rho(100.))) print(msis_surf_100) plt.ylabel(r'Density $\rho$ (g/cm$^3$)') plt.xlabel(r'Depth (g/cm$^2$)') plt.legend(loc='upper left') plt.tight_layout() plt.show()
import sys import numpy as np from PyQt4 import QtGui from PyQt4.QtCore import Qt from PyQt4.QtGui import QComboBox import Orange.data from Orange.preprocess import impute from Orange.base import Learner from Orange.widgets import gui, settings from Orange.widgets.utils import itemmodels from Orange.widgets.utils.sql import check_sql_input from Orange.widgets.widget import OWWidget from Orange.classification import SimpleTreeLearner class DisplayFormatDelegate(QtGui.QStyledItemDelegate): def initStyleOption(self, option, index): super().initStyleOption(option, index) method = index.data(Qt.UserRole) var = index.model()[index.row()] if method: option.text = method.format_variable(var) if not method.supports_variable(var): option.palette.setColor(option.palette.Text, Qt.darkRed) if isinstance(getattr(method, 'method', method), impute.DoNotImpute): option.palette.setColor(option.palette.Text, Qt.darkGray) class AsDefault(impute.BaseImputeMethod): name = "Default (above)" short_name = "" format = "{var.name}" columns_only = True method = impute.DoNotImpute() def __getattr__(self, item): return getattr(self.method, item) def supports_variable(self, variable): return self.method.supports_variable(variable) def __call__(self, *args, **kwargs): return self.method(*args, **kwargs) class OWImpute(OWWidget): name = "Impute" description = "Impute missing values in the data table." icon = "icons/Impute.svg" priority = 2130 inputs = [("Data", Orange.data.Table, "set_data"), ("Learner", Learner, "set_learner")] outputs = [("Data", Orange.data.Table)] DEFAULT_LEARNER = SimpleTreeLearner() METHODS = [AsDefault(), impute.DoNotImpute(), impute.Average(), impute.AsValue(), impute.Model(DEFAULT_LEARNER), impute.Random(), impute.DropInstances(), impute.Default()] DEFAULT, DO_NOT_IMPUTE, MODEL_BASED_IMPUTER, AS_INPUT = 0, 1, 4, 7 settingsHandler = settings.DomainContextHandler() _default_method_index = settings.Setting(DO_NOT_IMPUTE) variable_methods = settings.ContextSetting({}) autocommit = settings.Setting(False) default_value = settings.Setting(0.) want_main_area = False resizing_enabled = False def __init__(self): super().__init__() main_layout = QtGui.QVBoxLayout() main_layout.setMargin(10) self.controlArea.layout().addLayout(main_layout) box = QtGui.QGroupBox(title=self.tr("Default Method"), flat=False) box_layout = QtGui.QVBoxLayout(box) main_layout.addWidget(box) button_group = QtGui.QButtonGroup() button_group.buttonClicked[int].connect(self.set_default_method) for i, method in enumerate(self.METHODS): if not method.columns_only: button = QtGui.QRadioButton(method.name) button.setChecked(i == self.default_method_index) button_group.addButton(button, i) box_layout.addWidget(button) self.default_button_group = button_group box = QtGui.QGroupBox(title=self.tr("Individual Attribute Settings"), flat=False) main_layout.addWidget(box) horizontal_layout = QtGui.QHBoxLayout(box) main_layout.addWidget(box) self.varview = QtGui.QListView( selectionMode=QtGui.QListView.ExtendedSelection ) self.varview.setItemDelegate(DisplayFormatDelegate()) self.varmodel = itemmodels.VariableListModel() self.varview.setModel(self.varmodel) self.varview.selectionModel().selectionChanged.connect( self._on_var_selection_changed ) self.selection = self.varview.selectionModel() horizontal_layout.addWidget(self.varview) method_layout = QtGui.QVBoxLayout() horizontal_layout.addLayout(method_layout) button_group = QtGui.QButtonGroup() for i, method in enumerate(self.METHODS): button = QtGui.QRadioButton(text=method.name) button_group.addButton(button, i) method_layout.addWidget(button) self.value_combo = QComboBox( minimumContentsLength=8, sizeAdjustPolicy=QComboBox.AdjustToMinimumContentsLength, activated=self._on_value_selected ) self.value_combo.currentIndexChanged.connect(self._on_value_changed) self.value_double = QtGui.QDoubleSpinBox( editingFinished=self._on_value_selected, minimum=-1000., maximum=1000., singleStep=.1, decimals=3, value=self.default_value ) self.value_stack = value_stack = QtGui.QStackedLayout() value_stack.addWidget(self.value_combo) value_stack.addWidget(self.value_double) method_layout.addLayout(value_stack) button_group.buttonClicked[int].connect( self.set_method_for_current_selection ) method_layout.addStretch(2) reset_button = QtGui.QPushButton( "Restore All to Default", checked=False, checkable=False, clicked=self.reset_variable_methods, default=False, autoDefault=False) method_layout.addWidget(reset_button) self.variable_button_group = button_group box = gui.auto_commit( self.controlArea, self, "autocommit", "Apply", orientation=Qt.Horizontal, checkbox_label="Apply automatically") box.layout().insertSpacing(0, 80) box.layout().insertWidget(0, self.report_button) self.data = None self.modified = False self.default_method = self.METHODS[self.default_method_index] self.update_varview() @property def default_method_index(self): return self._default_method_index @default_method_index.setter def default_method_index(self, index): if self._default_method_index != index: self._default_method_index = index self.default_button_group.button(index).setChecked(True) self.default_method = self.METHODS[self.default_method_index] self.METHODS[self.DEFAULT].method = self.default_method # update variable view for index in map(self.varmodel.index, range(len(self.varmodel))): self.varmodel.setData(index, self.variable_methods.get(index.row(), self.METHODS[self.DEFAULT]), Qt.UserRole) self._invalidate() def set_default_method(self, index): """Set the current selected default imputation method. """ self.default_method_index = index @check_sql_input def set_data(self, data): self.closeContext() self.varmodel[:] = [] self.variable_methods = {} self.modified = False self.data = data if data is not None: self.varmodel[:] = data.domain.variables self.openContext(data.domain) self.update_varview() self.unconditional_commit() def set_learner(self, learner): self.learner = learner or self.DEFAULT_LEARNER imputer = self.METHODS[self.MODEL_BASED_IMPUTER] imputer.learner = self.learner button = self.default_button_group.button(self.MODEL_BASED_IMPUTER) button.setText(imputer.name) variable_button = self.variable_button_group.button(self.MODEL_BASED_IMPUTER) variable_button.setText(imputer.name) if learner is not None: self.default_method_index = self.MODEL_BASED_IMPUTER self.commit() def get_method_for_column(self, column_index): """Returns the imputation method for column by its index. """ if not isinstance(column_index, int): column_index = column_index.row() return self.variable_methods.get(column_index, self.METHODS[self.DEFAULT]) def _invalidate(self): self.modified = True self.commit() def commit(self): data = self.data if self.data is not None: drop_mask = np.zeros(len(self.data), bool) attributes = [] class_vars = [] self.warning(1) with self.progressBar(len(self.varmodel)) as progress: for i, var in enumerate(self.varmodel): method = self.variable_methods.get(i, self.default_method) if not method.supports_variable(var): self.warning(1, "Default method could not impute some " "of the variables.") elif isinstance(method, impute.DropInstances): drop_mask |= method(self.data, var) else: var = method(self.data, var) if isinstance(var, Orange.data.Variable): var = [var] if i < len(self.data.domain.attributes): attributes.extend(var) else: class_vars.extend(var) progress.advance() domain = Orange.data.Domain(attributes, class_vars, self.data.domain.metas) data = self.data.from_table(domain, self.data[~drop_mask]) self.send("Data", data) self.modified = False def send_report(self): specific = [] for i, var in enumerate(self.varmodel): method = self.variable_methods.get(i, None) if method is not None: specific.append("{} ({})".format(var.name, str(method))) default = self.default_method.name if specific: self.report_items(( ("Default method", default), ("Specific imputers", ", ".join(specific)) )) else: self.report_items((("Method", default),)) def _on_var_selection_changed(self): indexes = self.selection.selectedIndexes() methods = set(self.get_method_for_column(i.row()).name for i in indexes) selected_vars = [self.varmodel[index.row()] for index in indexes] has_discrete = any(var.is_discrete for var in selected_vars) if len(methods) == 1: method = methods.pop() for i, m in enumerate(self.METHODS): if method == m.name: self.variable_button_group.button(i).setChecked(True) elif self.variable_button_group.checkedButton() is not None: self.variable_button_group.setExclusive(False) self.variable_button_group.checkedButton().setChecked(False) self.variable_button_group.setExclusive(True) for method, button in zip(self.METHODS, self.variable_button_group.buttons()): enabled = all(method.supports_variable(var) for var in selected_vars) button.setEnabled(enabled) if not has_discrete: self.value_stack.setEnabled(True) self.value_stack.setCurrentWidget(self.value_double) self._on_value_changed() elif len(selected_vars) == 1: self.value_stack.setEnabled(True) self.value_stack.setCurrentWidget(self.value_combo) self.value_combo.clear() self.value_combo.addItems(selected_vars[0].values) self._on_value_changed() else: self.variable_button_group.button(self.AS_INPUT).setEnabled(False) self.value_stack.setEnabled(False) def set_method_for_current_selection(self, method_index): indexes = self.selection.selectedIndexes() self.set_method_for_indexes(indexes, method_index) def set_method_for_indexes(self, indexes, method_index): if method_index == self.DEFAULT: for index in indexes: self.variable_methods.pop(index, None) else: method = self.METHODS[method_index].copy() for index in indexes: self.variable_methods[index.row()] = method self.update_varview(indexes) self._invalidate() def update_varview(self, indexes=None): if indexes is None: indexes = map(self.varmodel.index, range(len(self.varmodel))) for index in indexes: self.varmodel.setData(index, self.get_method_for_column(index.row()), Qt.UserRole) def _on_value_selected(self): self.variable_button_group.button(self.AS_INPUT).setChecked(True) self._on_value_changed() def _on_value_changed(self): widget = self.value_stack.currentWidget() if widget is self.value_combo: value = self.value_combo.currentText() else: value = self.value_double.value() self.default_value = value self.METHODS[self.AS_INPUT].default = value index = self.variable_button_group.checkedId() if index == self.AS_INPUT: self.set_method_for_current_selection(index) def reset_variable_methods(self): indexes = map(self.varmodel.index, range(len(self.varmodel))) self.set_method_for_indexes(indexes, self.DEFAULT) self.variable_button_group.button(self.DEFAULT).setChecked(True) def main(argv=sys.argv): app = QtGui.QApplication(list(argv)) argv = app.argv() if len(argv) > 1: filename = argv[1] else: filename = "brown-selected" w = OWImpute() w.show() w.raise_() data = Orange.data.Table(filename) w.set_data(data) w.handleNewSignals() app.exec_() w.set_data(None) w.set_learner(None) w.handleNewSignals() w.onDeleteWidget() return 0 if __name__ == "__main__": sys.exit(main())
import botocore.client import boto3 import json import pytest import time import sure # noqa # pylint: disable=unused-import import uuid from moto import ( mock_dynamodb2, mock_lambda, mock_logs, mock_sns, mock_sqs, ) from uuid import uuid4 from .utilities import ( get_role_name, get_test_zip_file3, wait_for_log_msg, get_test_zip_file_error, ) _lambda_region = "us-west-2" boto3.setup_default_session(region_name=_lambda_region) @mock_logs @mock_lambda @mock_sqs def test_create_event_source_mapping(): function_name = str(uuid4())[0:6] sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName=f"{function_name}_queue") conn = boto3.client("lambda", region_name="us-east-1") func = conn.create_function( FunctionName=function_name, Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file3()}, Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) response = conn.create_event_source_mapping( EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"] ) assert response["EventSourceArn"] == queue.attributes["QueueArn"] assert response["FunctionArn"] == func["FunctionArn"] assert response["State"] == "Enabled" @pytest.mark.network @pytest.mark.parametrize("key", ["FunctionName", "FunctionArn"]) @mock_logs @mock_lambda @mock_sqs def test_invoke_function_from_sqs(key): function_name = str(uuid4())[0:6] sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName=f"{function_name}_queue") conn = boto3.client("lambda", region_name="us-east-1") func = conn.create_function( FunctionName=function_name, Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file3()}, Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) name_or_arn = func[key] response = conn.create_event_source_mapping( EventSourceArn=queue.attributes["QueueArn"], FunctionName=name_or_arn ) assert response["EventSourceArn"] == queue.attributes["QueueArn"] assert response["State"] == "Enabled" sqs_client = boto3.client("sqs", region_name="us-east-1") sqs_client.send_message(QueueUrl=queue.url, MessageBody="test") expected_msg = "get_test_zip_file3 success" log_group = f"/aws/lambda/{function_name}" msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) assert msg_showed_up, ( expected_msg + " was not found after sending an SQS message. All logs: " + str(all_logs) ) @pytest.mark.network @mock_logs @mock_lambda @mock_dynamodb2 def test_invoke_function_from_dynamodb_put(): dynamodb = boto3.client("dynamodb", region_name="us-east-1") table_name = str(uuid4())[0:6] + "_table" table = dynamodb.create_table( TableName=table_name, KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], StreamSpecification={ "StreamEnabled": True, "StreamViewType": "NEW_AND_OLD_IMAGES", }, ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 5}, ) conn = boto3.client("lambda", region_name="us-east-1") function_name = str(uuid4())[0:6] func = conn.create_function( FunctionName=function_name, Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file3()}, Description="test lambda function executed after a DynamoDB table is updated", Timeout=3, MemorySize=128, Publish=True, ) response = conn.create_event_source_mapping( EventSourceArn=table["TableDescription"]["LatestStreamArn"], FunctionName=func["FunctionArn"], ) assert response["EventSourceArn"] == table["TableDescription"]["LatestStreamArn"] assert response["State"] == "Enabled" dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}}) expected_msg = "get_test_zip_file3 success" log_group = f"/aws/lambda/{function_name}" msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) assert msg_showed_up, ( expected_msg + " was not found after a DDB insert. All logs: " + str(all_logs) ) @pytest.mark.network @mock_logs @mock_lambda @mock_dynamodb2 def test_invoke_function_from_dynamodb_update(): dynamodb = boto3.client("dynamodb", region_name="us-east-1") table_name = str(uuid4())[0:6] + "_table" table = dynamodb.create_table( TableName=table_name, KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], StreamSpecification={ "StreamEnabled": True, "StreamViewType": "NEW_AND_OLD_IMAGES", }, ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 5}, ) conn = boto3.client("lambda", region_name="us-east-1") function_name = str(uuid4())[0:6] func = conn.create_function( FunctionName=function_name, Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file3()}, Description="test lambda function executed after a DynamoDB table is updated", Timeout=3, MemorySize=128, Publish=True, ) conn.create_event_source_mapping( EventSourceArn=table["TableDescription"]["LatestStreamArn"], FunctionName=func["FunctionArn"], ) dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}}) log_group = f"/aws/lambda/{function_name}" expected_msg = "get_test_zip_file3 success" msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) assert "Nr_of_records(1)" in all_logs, "Only one item should be inserted" dynamodb.update_item( TableName=table_name, Key={"id": {"S": "item 1"}}, UpdateExpression="set #attr = :val", ExpressionAttributeNames={"#attr": "new_attr"}, ExpressionAttributeValues={":val": {"S": "new_val"}}, ) msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) assert msg_showed_up, ( expected_msg + " was not found after updating DDB. All logs: " + str(all_logs) ) assert "Nr_of_records(1)" in all_logs, "Only one item should be updated" assert ( "Nr_of_records(2)" not in all_logs ), "The inserted item should not show up again" @pytest.mark.network @mock_logs @mock_lambda @mock_sqs def test_invoke_function_from_sqs_exception(): function_name = str(uuid4())[0:6] logs_conn = boto3.client("logs", region_name="us-east-1") sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName=f"{function_name}_queue") conn = boto3.client("lambda", region_name="us-east-1") func = conn.create_function( FunctionName=function_name, Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file_error()}, Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) response = conn.create_event_source_mapping( EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"] ) assert response["EventSourceArn"] == queue.attributes["QueueArn"] assert response["State"] == "Enabled" entries = [] for i in range(3): body = {"uuid": str(uuid.uuid4()), "test": "test_{}".format(i)} entry = {"Id": str(i), "MessageBody": json.dumps(body)} entries.append(entry) queue.send_messages(Entries=entries) start = time.time() while (time.time() - start) < 30: result = logs_conn.describe_log_streams( logGroupName=f"/aws/lambda/{function_name}" ) log_streams = result.get("logStreams") if not log_streams: time.sleep(1) continue assert len(log_streams) >= 1 result = logs_conn.get_log_events( logGroupName=f"/aws/lambda/{function_name}", logStreamName=log_streams[0]["logStreamName"], ) for event in result.get("events"): if "I failed!" in event["message"]: messages = queue.receive_messages(MaxNumberOfMessages=10) # Verify messages are still visible and unprocessed assert len(messages) == 3 return time.sleep(1) assert False, "Test Failed" @pytest.mark.network @mock_logs @mock_sns @mock_lambda def test_invoke_function_from_sns(): logs_conn = boto3.client("logs", region_name=_lambda_region) sns_conn = boto3.client("sns", region_name=_lambda_region) sns_conn.create_topic(Name="some-topic") topics_json = sns_conn.list_topics() topics = topics_json["Topics"] topic_arn = topics[0]["TopicArn"] conn = boto3.client("lambda", _lambda_region) function_name = str(uuid4())[0:6] result = conn.create_function( FunctionName=function_name, Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file3()}, Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) sns_conn.subscribe( TopicArn=topic_arn, Protocol="lambda", Endpoint=result["FunctionArn"] ) result = sns_conn.publish(TopicArn=topic_arn, Message=json.dumps({})) start = time.time() events = [] while (time.time() - start) < 10: result = logs_conn.describe_log_streams( logGroupName=f"/aws/lambda/{function_name}" ) log_streams = result.get("logStreams") if not log_streams: time.sleep(1) continue assert len(log_streams) == 1 result = logs_conn.get_log_events( logGroupName=f"/aws/lambda/{function_name}", logStreamName=log_streams[0]["logStreamName"], ) events = result.get("events") for event in events: if event["message"] == "get_test_zip_file3 success": return time.sleep(1) assert False, "Expected message not found in logs:" + str(events) @mock_logs @mock_lambda @mock_sqs def test_list_event_source_mappings(): function_name = str(uuid4())[0:6] sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName=f"{function_name}_queue") conn = boto3.client("lambda", region_name="us-east-1") func = conn.create_function( FunctionName=function_name, Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file3()}, Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) response = conn.create_event_source_mapping( EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"] ) mappings = conn.list_event_source_mappings(EventSourceArn="123") mappings["EventSourceMappings"].should.have.length_of(0) mappings = conn.list_event_source_mappings( EventSourceArn=queue.attributes["QueueArn"] ) assert len(mappings["EventSourceMappings"]) >= 1 assert mappings["EventSourceMappings"][0]["UUID"] == response["UUID"] assert mappings["EventSourceMappings"][0]["FunctionArn"] == func["FunctionArn"] @mock_lambda @mock_sqs def test_get_event_source_mapping(): function_name = str(uuid4())[0:6] sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName=f"{function_name}_queue") conn = boto3.client("lambda", region_name="us-east-1") func = conn.create_function( FunctionName=function_name, Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file3()}, Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) response = conn.create_event_source_mapping( EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"] ) mapping = conn.get_event_source_mapping(UUID=response["UUID"]) assert mapping["UUID"] == response["UUID"] assert mapping["FunctionArn"] == func["FunctionArn"] conn.get_event_source_mapping.when.called_with(UUID="1").should.throw( botocore.client.ClientError ) @mock_lambda @mock_sqs def test_update_event_source_mapping(): function_name = str(uuid4())[0:6] sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName=f"{function_name}_queue") conn = boto3.client("lambda", region_name="us-east-1") func1 = conn.create_function( FunctionName=function_name, Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file3()}, Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) func2 = conn.create_function( FunctionName="testFunction2", Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file3()}, Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) response = conn.create_event_source_mapping( EventSourceArn=queue.attributes["QueueArn"], FunctionName=func1["FunctionArn"] ) assert response["FunctionArn"] == func1["FunctionArn"] assert response["BatchSize"] == 10 assert response["State"] == "Enabled" mapping = conn.update_event_source_mapping( UUID=response["UUID"], Enabled=False, BatchSize=2, FunctionName="testFunction2" ) assert mapping["UUID"] == response["UUID"] assert mapping["FunctionArn"] == func2["FunctionArn"] assert mapping["State"] == "Disabled" assert mapping["BatchSize"] == 2 @mock_lambda @mock_sqs def test_delete_event_source_mapping(): function_name = str(uuid4())[0:6] sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName=f"{function_name}_queue") conn = boto3.client("lambda", region_name="us-east-1") func1 = conn.create_function( FunctionName=function_name, Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", Code={"ZipFile": get_test_zip_file3()}, Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) response = conn.create_event_source_mapping( EventSourceArn=queue.attributes["QueueArn"], FunctionName=func1["FunctionArn"] ) assert response["FunctionArn"] == func1["FunctionArn"] assert response["BatchSize"] == 10 assert response["State"] == "Enabled" response = conn.delete_event_source_mapping(UUID=response["UUID"]) assert response["State"] == "Deleting" conn.get_event_source_mapping.when.called_with(UUID=response["UUID"]).should.throw( botocore.client.ClientError )
import struct from types import StringType class _BUILDER: '''Virtual base helper class for structured file scanning''' def _get_struct_fmt(self,info): fmt = '<' for f, _, _ in info: fmt += f return fmt def _scan_from_file(self,f,info): fmt = self._get_struct_fmt(info) size = struct.calcsize(fmt) T = struct.unpack(fmt,f.read(size)) i = 0 for _, n, _ in info: setattr(self,n,T[i]) i = i + 1 def _dump(self,A): for a in A: print a, getattr(self,a) def _attr_names(self,*I): A = [] for i in I: if type(i) is StringType: A.append(i) else: A.extend(map(lambda x: x[1],i)) return A def _scanZTStr(self,f,loc): '''scan a zero terminated string from the file''' f.seek(loc) s = '' while 1: c = f.read(1) if c=='\000': break s = s+c return s def _scanN(self,N,fmt,f,loc): if not loc: return None fmt = len(fmt)==1 and ("<%d%c" % (N,fmt)) or ("<"+N*fmt) f.seek(loc) size = struct.calcsize(fmt) return struct.unpack(fmt,f.read(size)) def _scanNT(self,T,N,fmt,f,loc): if not loc: return None n = len(fmt) X = [] i = 0 S = [] for x in self._scanN(N,fmt,f,loc): S.append(x) i = i + 1 if i==n: X.append(S) i = 0 S = [] return map(lambda x,T=T: apply(T,x),X) class KernPair: '''hold info about a possible kerning pair''' def __init__(self,first,second,amount): self.first = first self.scond = second self.amount = amount class KernTrack: def __init__(self,degree,minSize,minAmount,maxSize,maxAmount): ''' degree amount to change the character spacing. Negative values mean closer together,p ositive values mean farther apart. minSize minimum font height (in device units) for which to use linear track kerning. minAmount track kerning amount to use for font heights less or equal ktMinSize. maxSize maximum font height (in device units) for which to use linear track kerning.f For font heights between ktMinSize and ktMaxSize the track kerning amount has to increase linearily from ktMinAmount to ktMaxAmount. maxAmount track kerning amount to use for font heights greater or equal ktMaxSize. ''' self.degree = degree self.minSize = minSize self.minAmount = minAmount self.maxSize = maxSize self.maxAmount = maxAmount class PFM(_BUILDER): def __init__(self,fn=None): if fn: if type(fn) is StringType: f = open(fn,'rb') else: f = fn self.scan_from_file(f) if f is not fn: f.close() '''Class to hold information scanned from a type-1 .pfm file''' def scan_from_file(self,f): self._scan_from_file(f,self._header_struct_info) if self.dfType!=0x81: raise ValueError, "Not a Type-1 Font description" else: self.WidthTable = None self._scan_from_file(f,self._extension_struct_info) if not self.dfExtentTable: raise ValueError, 'dfExtentTable is zero' if not self.dfExtMetricsOffset: raise ValueError, 'dfExtMetricsOffset is zero' if self.dfDevice: self.DeviceName = self._scanZTStr(f,self.dfDevice) else: self.DeviceName = None if self.dfFace: self.FaceName = self._scanZTStr(f,self.dfFace) else: self.FaceName = None f.seek(self.dfExtMetricsOffset) self._scan_from_file(f, self._extTextMetrics_struct_info) N = self.dfLastChar - self.dfFirstChar + 1 self.ExtentTable = self._scanN(N,'H',f,self.dfExtentTable) if self.dfDriverInfo: self.DriverInfo = self._scanZTStr(f,self.dfDriverInfo) else: self.DriverInfo = None if self.dfPairKernTable: self.KerningPairs = self._scanNT(KernPair,self.dfPairKernTable,'BBh',f,self.etmKernPairs) else: self.KerningPairs = [] if self.dfTrackKernTable: self.KerningTracks = self._scanNT(KernTrack,self.dfTrackKernTable,'hhhhh',f,self.etmKernTracks) else: self.KerningTracks = [] def dump(self): self._dump( self._attr_names( self._header_struct_info,'WidthTable', self._extension_struct_info, 'DeviceName', 'FaceName', self._extTextMetrics_struct_info, 'DriverInfo', )) _header_struct_info = (('H','dfVersion', '''This field contains the version of the PFM file. For PFM files that conform to this description (namely PFM files for Type-1 fonts) the value of this field is always 0x0100.'''), ('i','dfSize', '''This field contains the total size of the PFM file in bytes. Some drivers check this field and compare its value with the size of the PFM file, and if these two values don't match the font is ignored (I know this happens e.g. with Adobe PostScript printer drivers). '''), ('60s','dfCopyright', '''This field contains a null-terminated copyright string, often from the application that created the PFM file (this normally isn't the copyright string for the font file itself). The unused bytes in this field should be set to zero. '''), ('H','dfType', '''This field contains the font type. The low-order byte is a combination of the following values (only the values being of interest in PFM files are given): 0x00 (PF_RASTER_TYPE): font is a raster font 0x01 (PF_VECTOR_TYPE): font is a vector font 0x80 (PF_DEVICE_REALIZED): font realized by the device driver The high-order byte is never used in PFM files, it is always zero. In PFM files for Type-1 fonts the value in this field is always 0x0081. '''), ('H','dfPoints', '''This field contains the point size at which this font looks best. Since this is not relevant for scalable fonts the field is ignored. The value of this field should be set to 0x000a (10 pt). '''), ('H','dfVertRes', '''This field contains the vertical resolution at which the font was digitized (the value is in dots per inch). The value of this field should be set to 0x012C (300 dpi). '''), ('H','dfHorizRes', '''This field contains the horizontal resolution at which the font was digitized (the value is in dots per inch). The value of this field should be set to 0x012C (300 dpi). '''), ('H','dfAscent', '''This field contains the distance from the top of a character definition cell to the baseline of the typographical font. It is useful for aligning the baseline of fonts of different heights. '''), ('H','dfInternalLeading', '''This field contains the amount of leading inside the bounds set by the dfPixHeight field in the PFMHEADER structure. Accent marks may occur in this area. '''), ('H','dfExternalLeading', '''This field contains the amount of extra leading that the designer requests the application to add between rows. Since this area is outside the character definition cells, it contains no marks and will not be altered by text outputs. '''), ('B','dfItalic', '''This field specifies whether this font is an italic (or oblique) font. The low-order bit is 1 if the flag is set, all other bits are zero. '''), ('B','dfUnderline', '''This field specifies whether this font is an underlined font. The low-order bit is 1 if the flag is set, all other bits are zero. '''), ('B','dfStrikeOut', '''This field specifies whether this font is a striked-out font. The low-order bit is 1 if the flag is set, all other bits are zero. '''), ('H','dfWeight', '''This field contains the weight of the characters in this font. The value is on a scale from 0 through 1000, increments are in steps of 100 each. The values roughly give the number of black pixel from every 1000 pixels. Typical values are: 0 (FW_DONTCARE): unknown or no information 300 (FW_LIGHT): light font 400 (FW_NORMAL): normal font 700 (FW_BOLD): bold font '''), ('B','dfCharSet', '''This field specifies the character set used in this font. It can be one of the following values (probably other values may be used here as well): 0x00 (ANSI_CHARSET): the font uses the ANSI character set; this means that the font implements all characters needed for the current Windows code page (e.g. 1252). In case of a Type-1 font this font has been created with the encoding StandardEncoding Note that the code page number itself is not stored in the PFM file. 0x02 (SYMBOL_CHARSET): the font uses a font-specific encoding which will be used unchanged in displaying an printing text using this font. In case of a Type-1 font this font has been created with a font-specific encoding vector. Typical examples are the Symbol and the ZapfDingbats fonts. 0xFF (OEM_CHARSET): the font uses the OEM character set; this means that the font implements all characters needed for the code page 437 used in e.g. MS-DOS command line mode (at least in some versions of Windows, others might use code page 850 instead). In case of a Type-1 font this font has been created with a font-specific encoding vector. '''), ('H','dfPixWidth', '''This field contains the width of all characters in the font. For raster fonts this field contains the width in pixels of every character bitmap if the font is fixed-pitch, otherwise this field is zero and the character's widths are specified in the WidthTable table. For vector fonts this field contains the width of the grid on which the font was digitized. The value is ignored by PostScript printer drivers. '''), ('H','dfPixHeight', '''This field contains the height of all characters in the font. For raster fonts this field contains the height in scan lines of every character bitmap. For vector fonts this field contains the height of the grid on which the font was digitized. The value is ignored by PostScript printer drivers. '''), ('B','dfPitchAndFamily', '''This field specifies the font pitch and the font family. The font pitch specifies whether all characters in the font have the same pitch (this is called fixed pitch too) or variable pitch. The font family indicates, in a rather general way, the look of a font. The least significant bit in this field contains the pitch flag. If the bit is set the font is variable pitch, otherwise it's fixed pitch. For Type-1 fonts this flag is set always, even if the Type-1 font is fixed pitch. The most significant bits of this field specify the font family. These bits may have one of the following values: 0x00 (FF_DONTCARE): no information 0x10 (FF_ROMAN): serif font, variable pitch 0x20 (FF_SWISS): sans serif font, variable pitch 0x30 (FF_MODERN): fixed pitch, serif or sans serif font 0x40 (FF_SCRIPT): cursive or handwriting font 0x50 (FF_DECORATIVE): novelty fonts '''), ('H','dfAvgWidth', '''This field contains the average width of the characters in the font. For a fixed pitch font this is the same as dfPixWidth in the PFMHEADER structure. For a variable pitch font this is the width of the character 'X'. '''), ('H','dfMaxWidth', '''This field contains the maximum width of the characters in the font. For a fixed pitch font this value is identical to dfAvgWidth in the PFMHEADER structure. '''), ('B','dfFirstChar', '''This field specifies the first character code defined by this font. Width definitions are stored only for the characters actually present in a font, so this field must be used when calculating indexes into the WidthTable or the ExtentTable tables. For text fonts this field is normally set to 0x20 (character space). '''), ('B','dfLastChar', '''This field specifies the last character code defined by this font. Together with the dfFirstChar field in the PFMHEADER structure this field specifies the valid character range for this font. There must be an entry in the WidthTable or the ExtentTable tables for every character between these two values (including these values themselves). For text fonts this field is normally set to 0xFF (maximum possible value). '''), ('B','dfDefaultChar', '''This field specifies the default character to be used whenever a character is used that is outside the range of the dfFirstChar through dfLastChar fields in the PFMHEADER structure. The character is given relative to dfFirstChar so that the actual value of the default character is the sum of dfFirstChar and dfDefaultChar. Ideally, the default character should be a visible character in the current font, e.g. a period ('.'). For text fonts this field is normally set to either 0x00 (character space) or 0x75 (bullet). '''), ('B','dfBreakChar', '''This field specifies the word-break character. Applications use this character to separate words when wrapping or justifying lines of text. The character is given relative to dfFirstChar in the PFMHEADER structure so that the actual value of the word-break character is the sum of dfFirstChar and dfBreakChar. For text fonts this field is normally set to 0x00 (character space). '''), ('H','dfWidthBytes', '''This field contains the number of bytes in every row of the font bitmap. The value is always an even quantity so that rows of the bitmap start on 16 bit boundaries. This field is not used for vector fonts, it is therefore zero in e.g. PFM files for Type-1 fonts. '''), ('i','dfDevice', '''This field contains the offset from the beginning of the PFM file to the DeviceName character buffer. The DeviceName is always present in PFM files for Type-1 fonts, this field is therefore never zero.'''), ('i','dfFace', '''This field contains the offset from the beginning of the PFM file to the FaceName character buffer. The FaceName is always present in PFM files for Type-1 fonts, this field is therefore never zero. '''), ('i','dfBitsPointer', '''This field is not used in PFM files, it must be set to zero. '''), ('i','dfBitsOffset', '''This field is not used in PFM files, it must be set to zero. '''), ) #'H','WidthTable[]' #This section is present in a PFM file only when this PFM file describes a #variable pitch raster font. Since Type-1 fonts aren't raster fonts this #section never exists in PFM files for Type-1 fonts.''' #The WidthTable table consists of (dfLastChar - dfFirstChar + 2) entries of type WORD (dfFirstChar and dfLastChar can be found in the #PFMHEADER structure). Every entry contains the width of the corresponding character, the last entry in this table is extra, it is set to zero. _extension_struct_info=( ('H','dfSizeFields', '''This field contains the size (in bytes) of the PFMEXTENSION structure. The value is always 0x001e. '''), ('I','dfExtMetricsOffset', '''This field contains the offset from the beginning of the PFM file to the ExtTextMetrics section. The ExtTextMetrics section is always present in PFM files for Type-1 fonts, this field is therefore never zero. '''), ('I','dfExtentTable', '''This field contains the offset from the beginning of the PFM file to the ExtentTable table. This table is always present in PFM files for Type-1 fonts, this field is therefore never zero. '''), ('I','dfOriginTable', '''This field contains the offset from the beginning of the PFM file to a table containing origin coordinates for screen fonts. This table is not present in PFM files for Type-1 fonts, the field must therefore be set to zero. '''), ('I','dfPairKernTable', '''This field contains the offset from the beginning of the PFM file to the KerningPairs table. The value must be zero if the PFM file doesn't contain a KerningPairs table. '''), ('I','dfTrackKernTable', '''This field contains the offset from the beginning of the PFM file to the KerningTracks table. The value must be zero if the PFM file doesn't contain a kerningTracks table. '''), ('I','dfDriverInfo', '''This field contains the offset from the beginning of the PFM file to the DriverInfo section. This section is always present in PFM files for Type-1 fonts, this field is therefore never zero. '''), ('I','dfReserved', '''This field must be set to zero. '''), ) #char DeviceName[] #The DeviceName character buffer is a null-terminated string #containing the name of the printer driver family. PFM files #for Type-1 fonts have the string 'PostScript', PFM files for #PCL fonts have the string 'PCL/HP LaserJet'. #char FaceName[] #The FaceName character buffer is a null-terminated string #containing the name of the font face. In PFM files for Type-1 #fonts this is normally #the PostScript name of the font without suffixes like #'-Bold', '-Italic' etc. _extTextMetrics_struct_info = (('h','etmSize', '''This field contains the size (in bytes) of the EXTTEXTMETRIC structure. The value is always 0x0034. '''), ('h','etmPointSize', '''This field contains the nominal point size of the font in twips (this is a twentieth of a point or 1/1440 inch). This is the intended graphics art size of the font, the actual size may differ slightly depending on the resolution of the output device. In PFM files for Type-1 fonts this value should be set to 0x00f0 (240 twips or 12 pt). '''), ('h','etmOrientation', '''This field contains the orientation of the font. This value refers to the ability of the font to be imaged on a page of a given orientation. It can be one of the following values: 0x0000: any orientation 0x0001: portrait (page width is smaller that its height) 0x0002: landscape (page width is greater than its height) In PFM files for Type-1 fonts this field is always 0x0000 since a Type-1 font can be arbitrarily rotated. '''), ('h','etmMasterHeight', '''This field contains the font size in device units for which the values in the ExtentTable table are exact. Since Type-1 fonts are by convention defined in a box of 1000 x 1000 units, PFM files for Type-1 fonts have the value 0x03E8 (1000, the number of units per em) in this field. '''), ('h','etmMinScale', '''This field contains the minimum valid size for the font in device units. The minimum valid point size can then be calculated as follows: (etmMinScale * points-per-inch) / dfVertRes The value for 'points-per-inch' is normally 72, the dfVertRes field can be found in the PFMHEADER structure, it contains the vertical resolution at which the font was digitized (this value is in dots per inch). In PFM files for Type-1 fonts the value should be set to 0x0003. '''), ('h','etmMaxScale', '''This field contains the maximum valid size for the font in device units. The maximum valid point size can then be calculated as follows: (etmMaxScale * points-per-inch) / dfVertRes (see also above etmMinScale). In PFM files for Type-1 fonts the value should be set to 0x03E8 (1000). '''), ('h','etmMasterUnits', '''This field contains the integer number of units per em where an em equals etmMasterHeight in the EXTTEXTMETRIC structure. In other words, the etmMasterHeight value is expressed in font units rather than device units. In PFM files for Type-1 fonts the value should be set to 0x03E8 (1000). '''), ('h','etmCapHeight', '''This field contains the height for uppercase characters in the font (the value is in font units). Typically, the character 'H' is used for measurement purposes. For Type-1 fonts you may find this value in the AFM file. '''), ('h','etmXHeight', '''This field contains the height for lowercase characters in the font (the value is in font units). Typically, the character 'x' is used for measurement purposes. For Type-1 fonts you may find this value in the AFM file. '''), ('h','etmLowerCaseAscent', '''This field contains the distance (in font units) that the ascender of lowercase letters extends above the baseline. This distance is typically specified for a lowercase character 'd'. For Type-1 fonts you may find this value in the AFM file. '''), ('h','etmLowerCaseDescent', '''This field contains the distance (in font units) that the descender of lowercase letters extends below the baseline. This distance is typically specified for a lowercase character 'p'. For Type-1 fonts you may find this value in the AFM file. '''), ('h','etmSlant', '''This field contains the angle in tenth of degrees clockwise from the upright version of the font. The value is typically not zero only for an italic or oblique font. For Type-1 fonts you may find this value in the AFM file (search for the entry 'ItalicAngle' and multiply it by 10). '''), ('h','etmSuperScript', '''This field contains the recommended amount (in font units) to offset superscript characters from the baseline. This amount is typically specified by a negative offset. '''), ('h','etmSubScript', '''This field contains the recommended amount (in font units) to offset subscript characters from the baseline. This amount is typically specified by a positive offset. '''), ('h','etmSuperScriptSize', '''This field contains the recommended size (in font units) for superscript characters in the font. '''), ('h','etmSubScriptSize', '''This field contains the recommended size (in font units) for subscript characters in the font. '''), ('h','etmUnderlineOffset', '''This field contains the offset (in font units) downward from the baseline where the top of a single underline bar should appear. For Type-1 fonts you may find this value in the AFM file. '''), ('h','etmUnderlineWidth', '''This field contains the thickness (in font units) of the underline bar. For Type-1 fonts you may find this value in the AFM file. '''), ('h','etmDoubleUpperUnderlineOffset', '''This field contains the offset (in font units) downward from the baseline where the top of the upper, double underline bar should appear. '''), ('h','etmDoubleLowerUnderlineOffset', '''This field contains the offset (in font units) downward from the baseline where the top of the lower, double underline bar should appear. '''), ('h','etmDoubleUpperUnderlineWidth', '''This field contains the thickness (in font units) of the upper, double underline bar. '''), ('h','etmDoubleLowerUnderlineWidth', '''This field contains the thickness (in font units) of the lower, double underline bar. '''), ('h','etmStrikeOutOffset', '''This field contains the offset (in font units) upward from the baseline where the top of a strikeout bar should appear. '''), ('h','etmStrikeOutWidth', '''This field contains the thickness (in font units) of the strikeout bar. '''), ('H','etmKernPairs', '''This field contains the number of kerning pairs defined in the KerningPairs table in this PFM file. The number (and therefore the table) may not be greater than 512. If the PFM file doesn't contain a KerningPairs table the value is zero. '''), ('H','etmKernTracks', '''This field contains the number of kerning tracks defined in the KerningTracks table in this PFM file. The number (and therefore the table) may not be greater than 16. If the PFM file doesn't contain a KerningTracks table the value is zero. '''), ) #'H','ExtentTable[]' #The ExtentTable table must be present in a PFM file for a Type-1 font, #it contains the unscaled widths (in 1/1000's of an em) of the characters #in the font. The table consists of (dfLastChar - dfFirstChar + 1) entries #of type WORD (dfFirstChar and dfLastChar can be found in the PFMHEADER #structure). For Type-1 fonts these widths can be found in the AFM file. #DRIVERINFO DriverInfo #The DriverInfo section must be present in a PFM file for a Type-1 font, #in this case it consists of a null-terminated string containing the #PostScript name of the font. #PAIRKERN KerningPairs[] #The KerningPairs table need not be present in a PFM file for a Type-1 #font, if it exists it contains etmKernPairs (from the EXTTEXTMETRIC #structure) entries. Each of these entries looks as follows: #B kpFirst This field contains the first (left) character of the kerning pair. #B kpSecond This field contains the second (right) character of the kerning pair. #h kpKernAmount This field contains the kerning amount in font units, the value # is mostly negative. #KERNTRACK KerningTracks[] #The KerningTracks table need not be present in a PFM file for a Type-1 font, if it exists it contains etmKernTracks (from the EXTTEXTMETRIC structure) entries. Each of these entries looks as follows: #h ktDegree This field contains the amount to change the character spacing. Negative values mean closer together, positive values mean farther apart. #h ktMinSize This field contains the minimum font height (in device units) for which to use linear track kerning. #h ktMinAmount This field contains the track kerning amount to use for font heights less or equal ktMinSize. #h ktMaxSize This field contains the maximum font height (in device units) for which to use linear track kerning. For font heights between ktMinSize and ktMaxSize the track kerning amount has to increase linearily from ktMinAmount to ktMaxAmount. #h ktMaxAmount This field contains the track kerning amount to use for font heights greater or equal ktMaxSize. if __name__=='__main__': from glob import glob for f in glob('/Program Files/Adobe/Acrobat 4.0/resource/font/pfm/*.pfm'): print f p=PFM(f) p.dump()
""" Database module used to fetch and insert data into the database. Constructs queries from arguments with most of them being optional. Prevent some SQL injection attacks (most of them are secured via psycopg2) """ import psycopg2 import random from pyMetricServer import DEBUG from pyMetricServer.config import * import time if not DEBUG: database = psycopg2.connect(host=DATABASE_HOST, port=DATABASE_PORT, user=DATABASE_USER, password=DATABASE_PASS, database=DATABASE_NAME) cursor = database.cursor(); cursor.execute( "CREATE TABLE IF NOT EXISTS log_messages (Id BIGSERIAL, Time INTEGER, Origin TEXT, Message TEXT, Type INTEGER);") cursor.execute( "CREATE TABLE IF NOT EXISTS log_metrics (Id BIGSERIAL, Time INTEGER, Origin TEXT, Key TEXT, Value DOUBLE PRECISION);") database.commit() cursor.close() else: database = None def getMetric(timefrom=None, timeto=None, origin=None, key=None, count=None, order=None): if DEBUG: return getMetricDebug(timefrom, timeto, origin, key, count, order) results = [] cursor = database.cursor() params = [] query = "SELECT Id, Time, Origin, Key, Value FROM log_metrics " if (timefrom != None or timeto != None or origin != None or key != None): query += "WHERE " if timefrom != None: query += "Time >= %s AND " params.append(timefrom) if timeto != None: query += "Time <= %s AND " params.append(timeto) if origin != None: query += "Origin = %s AND " params.append(origin) if key != None: query += "Key = %s AND " params.append(key) query = query.strip("AND ") query += " " if order != None and order[0] != None: if order[1]: desc = "DESC " else: desc = "ASC " if order[0] == "time": query += "ORDER BY Time " + desc elif order[0] == "value": query += "ORDER BY Value " + desc elif order[0] == "key": query += "ORDER BY Key " + desc elif order[0] == "origin": query += "ORDER BY Origin " + desc elif order[0] == "id": query += "ORDER BY Id" + desc if count != None: query += "LIMIT %s " params.append(count) cursor.execute(query, tuple(params)) for row in cursor: results.append({ "Id": str(row[0]), "Time": str(row[1]), "Origin": str(row[2]), "Key": str(row[3]), "Value": str(row[4]) }) cursor.close() return results def getMetricDebug(timefrom=None, timeto=None, origin=None, key=None, count=None, order=None): results = [] for x in range((int(timeto)-int(timefrom))/20): results.append({ "Id": str(random.randint(1, 2*1024*1024)), "Time": str(time.time()-x*20), "Origin": str(origin), "Key": str(key), "Value": str(random.randint(0, 100)) }) return results def getMessage(timefrom=None, timeto=None, origin=None, typ=None, count=None, order=None): results = [] cursor = database.cursor() params = [] query = "SELECT Id, Time, Origin, Message, Type FROM log_messages " if (timefrom != None or timeto != None or origin != None or typ != None): query += "WHERE " if timefrom != None: query += "Time >= %s AND " params.append(timefrom) if timeto != None: query += "Time <= %s AND " params.append(timeto) if origin != None: query += "Origin = %s AND " params.append(origin) if typ != None: query += "Type = %s AND " params.append(typ) query = query.strip("AND ") query += " " if order != None and order[0] != None: if order[1]: desc = "DESC " else: desc = "ASC " if order[0] == "time": query += "ORDER BY Time " + desc elif order[0] == "type": query += "ORDER BY Type " + desc elif order[0] == "origin": query += "ORDER BY Origin " + desc elif order[0] == "id": query += "ORDER BY Id" + desc if count != None: query += "LIMIT %s " params.append(count) cursor.execute(query, tuple(params)) for row in cursor: results.append({ "Id": str(row[0]), "Time": str(row[1]), "Origin": str(row[2]), "Message": str(row[3]), "Type": str(row[4]) }) return results pass def insertMetric(time, origin, key, value): cursor = database.cursor() cursor.execute( "INSERT INTO log_metrics (Time, Origin, Key, Value) VALUES (%s, %s, %s, %s) RETURNING Id, Time, Origin, Key, Value", (time, origin, key, value)) row = cursor.fetchone() cursor.close() database.commit() return { "Id": str(row[0]), "Time": str(row[1]), "Origin": str(row[2]), "Key": str(row[3]), "Value": str(row[4]) } def insertMessage(time, origin, message, typ): cursor = database.cursor() cursor.execute( "INSERT INTO log_messages (Time, Origin, Message, Type) VALUES (%s, %s, %s, %s) RETURNING Id, Time, Origin, Message, Type", (int(float(time)), origin, message, int(typ))); row = cursor.fetchone() cursor.close() database.commit() return { "Id": str(row[0]), "Time": str(row[1]), "Origin": str(row[2]), "Message": str(row[3]), "Type": str(row[4]) }
# -*- coding: utf-8 -*- from base64 import b64encode from hashlib import sha1 import os import socket import ssl from ws4py import WS_KEY, WS_VERSION from ws4py.exc import HandshakeError from ws4py.websocket import WebSocket from ws4py.compat import urlsplit __all__ = ['WebSocketBaseClient'] class WebSocketBaseClient(WebSocket): def __init__(self, url, protocols=None, extensions=None, heartbeat_freq=None, ssl_options=None, headers=None): """ A websocket client that implements :rfc:`6455` and provides a simple interface to communicate with a websocket server. This class works on its own but will block if not run in its own thread. When an instance of this class is created, a :py:mod:`socket` is created. If the connection is a TCP socket, the nagle's algorithm is disabled. The address of the server will be extracted from the given websocket url. The websocket key is randomly generated, reset the `key` attribute if you want to provide yours. For instance to create a TCP client: .. code-block:: python >>> from websocket.client import WebSocketBaseClient >>> ws = WebSocketBaseClient('ws://localhost/ws') Here is an example for a TCP client over SSL: .. code-block:: python >>> from websocket.client import WebSocketBaseClient >>> ws = WebSocketBaseClient('wss://localhost/ws') Finally an example of a Unix-domain connection: .. code-block:: python >>> from websocket.client import WebSocketBaseClient >>> ws = WebSocketBaseClient('ws+unix:///tmp/my.sock') Note that in this case, the initial Upgrade request will be sent to ``/``. You may need to change this by setting the resource explicitely before connecting: .. code-block:: python >>> from websocket.client import WebSocketBaseClient >>> ws = WebSocketBaseClient('ws+unix:///tmp/my.sock') >>> ws.resource = '/ws' >>> ws.connect() You may provide extra headers by passing a list of tuples which must be unicode objects. """ self.url = url self.host = None self.scheme = None self.port = None self.unix_socket_path = None self.resource = None self.ssl_options = ssl_options or {} self.extra_headers = headers or [] self._parse_url() if self.unix_socket_path: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) else: # Let's handle IPv4 and IPv6 addresses # Simplified from CherryPy's code try: family, socktype, proto, canonname, sa = socket.getaddrinfo(self.host, self.port, socket.AF_INET, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)[0] except socket.gaierror: family = socket.AF_INET if self.host.startswith('::'): family = socket.AF_INET6 socktype = socket.SOCK_STREAM proto = 0 canonname = "" sa = (self.host, self.port, 0, 0) sock = socket.socket(family, socktype, proto) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 and \ self.host.startswith('::'): try: sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) except (AttributeError, socket.error): pass WebSocket.__init__(self, sock, protocols=protocols, extensions=extensions, heartbeat_freq=heartbeat_freq) self.stream.always_mask = True self.stream.expect_masking = False self.key = b64encode(os.urandom(16)) # Adpated from: https://github.com/liris/websocket-client/blob/master/websocket.py#L105 def _parse_url(self): """ Parses a URL which must have one of the following forms: - ws://host[:port][path] - wss://host[:port][path] - ws+unix:///path/to/my.socket In the first two cases, the ``host`` and ``port`` attributes will be set to the parsed values. If no port is explicitely provided, it will be either 80 or 443 based on the scheme. Also, the ``resource`` attribute is set to the path segment of the URL (alongside any querystring). In addition, if the scheme is ``ws+unix``, the ``unix_socket_path`` attribute is set to the path to the Unix socket while the ``resource`` attribute is set to ``/``. """ # Python 2.6.1 and below don't parse ws or wss urls properly. netloc is empty. # See: https://github.com/Lawouach/WebSocket-for-Python/issues/59 scheme, url = self.url.split(":", 1) parsed = urlsplit(url, scheme="http") if parsed.hostname: self.host = parsed.hostname elif '+unix' in scheme: self.host = 'localhost' else: raise ValueError("Invalid hostname from: %s", self.url) if parsed.port: self.port = parsed.port if scheme == "ws": if not self.port: self.port = 80 elif scheme == "wss": if not self.port: self.port = 443 elif scheme in ('ws+unix', 'wss+unix'): pass else: raise ValueError("Invalid scheme: %s" % scheme) if parsed.path: resource = parsed.path else: resource = "/" if '+unix' in scheme: self.unix_socket_path = resource resource = '/' if parsed.query: resource += "?" + parsed.query self.scheme = scheme self.resource = resource @property def bind_addr(self): """ Returns the Unix socket path if or a tuple ``(host, port)`` depending on the initial URL's scheme. """ return self.unix_socket_path or (self.host, self.port) def close(self, code=1000, reason=''): """ Initiate the closing handshake with the server. """ if not self.client_terminated: self.client_terminated = True self._write(self.stream.close(code=code, reason=reason).single(mask=True)) def connect(self): """ Connects this websocket and starts the upgrade handshake with the remote endpoint. """ if self.scheme == "wss": # default port is now 443; upgrade self.sender to send ssl self.sock = ssl.wrap_socket(self.sock, **self.ssl_options) self._is_secure = True self.sock.connect(self.bind_addr) self._write(self.handshake_request) response = b'' doubleCLRF = b'\r\n\r\n' while True: bytes = self.sock.recv(128) if not bytes: break response += bytes if doubleCLRF in response: break if not response: self.close_connection() raise HandshakeError("Invalid response") headers, _, body = response.partition(doubleCLRF) response_line, _, headers = headers.partition(b'\r\n') try: self.process_response_line(response_line) self.protocols, self.extensions = self.process_handshake_header(headers) except HandshakeError: self.close_connection() raise self.handshake_ok() if body: self.process(body) @property def handshake_headers(self): """ List of headers appropriate for the upgrade handshake. """ headers = [ ('Host', '%s:%s' % (self.host, self.port)), ('Connection', 'Upgrade'), ('Upgrade', 'websocket'), ('Sec-WebSocket-Key', self.key.decode('utf-8')), ('Sec-WebSocket-Version', str(max(WS_VERSION))) ] if self.protocols: headers.append(('Sec-WebSocket-Protocol', ','.join(self.protocols))) if self.extra_headers: headers.extend(self.extra_headers) if not any(x for x in headers if x[0].lower() == 'origin'): scheme, url = self.url.split(":", 1) parsed = urlsplit(url, scheme="http") if parsed.hostname: self.host = parsed.hostname else: self.host = 'localhost' origin = scheme + '://' + parsed.hostname if parsed.port: origin = origin + ':' + str(parsed.port) headers.append(('Origin', origin)) return headers @property def handshake_request(self): """ Prepare the request to be sent for the upgrade handshake. """ headers = self.handshake_headers request = [("GET %s HTTP/1.1" % self.resource).encode('utf-8')] for header, value in headers: request.append(("%s: %s" % (header, value)).encode('utf-8')) request.append(b'\r\n') return b'\r\n'.join(request) def process_response_line(self, response_line): """ Ensure that we received a HTTP `101` status code in response to our request and if not raises :exc:`HandshakeError`. """ protocol, code, status = response_line.split(b' ', 2) if code != b'101': raise HandshakeError("Invalid response status: %s %s" % (code, status)) def process_handshake_header(self, headers): """ Read the upgrade handshake's response headers and validate them against :rfc:`6455`. """ protocols = [] extensions = [] headers = headers.strip() for header_line in headers.split(b'\r\n'): header, value = header_line.split(b':', 1) header = header.strip().lower() value = value.strip().lower() if header == b'upgrade' and value != b'websocket': raise HandshakeError("Invalid Upgrade header: %s" % value) elif header == b'connection' and value != b'upgrade': raise HandshakeError("Invalid Connection header: %s" % value) elif header == b'sec-websocket-accept': match = b64encode(sha1(self.key + WS_KEY).digest()) if value != match.lower(): raise HandshakeError("Invalid challenge response: %s" % value) elif header == b'sec-websocket-protocol': protocols = ','.join(value) elif header == b'sec-websocket-extensions': extensions = ','.join(value) return protocols, extensions def handshake_ok(self): self.opened()
import math, os import pygame import thorpy has_surfarray = False try: from PyWorld2D.rendering.tilers.beachtiler import BeachTiler from PyWorld2D.rendering.tilers.basetiler import BaseTiler from PyWorld2D.rendering.tilers.roundtiler import RoundTiler from PyWorld2D.rendering.tilers.loadtiler import LoadTiler has_surfarray = True except: from PyWorld2D.rendering.tilers.loadtiler import LoadTiler def get_mixed_tiles(img1, img2, alpha_img_2): i1 = img1.copy() i2 = img2.copy() i2.set_alpha(alpha_img_2) i1.blit(i2,(0,0)) return i1 ##def get_shifted_tiles(img, nframes, dx=0, dy=0, reverse=False, sin=True): ## w, h = img.get_size() ## s = pygame.Surface((2*w,2*h)) ## s.blit(img, (0,0)) ## s.blit(img, (w,0)) ## s.blit(img, (0,h)) ## s.blit(img, (w,h)) ## #now we just have to take slices ## images = [] ## for i in range(nframes): ## if sin: ## delta_x = dx*math.sin(2.*math.pi*i/float(nframes)) ## delta_y = dy*math.sin(2.*math.pi*i/float(nframes)) ## else: ## delta_x = i*dx ## delta_y = i*dy ## result = pygame.Surface((w,h)) ## result.blit(s,(delta_x-w//2,delta_y-h//2)) ## images.append(result) ## if reverse: ## images += images[::-1][1:-1] ## return images def get_shifted_tiles(img, nframes, dx=0, dy=0, reverse=False, sin=True): r = img.get_rect() w,h = r.size images = [] for i in range(nframes): if sin: delta_x = dx*math.sin(2.*math.pi*i/float(nframes)) delta_y = dy*math.sin(2.*math.pi*i/float(nframes)) else: delta_x = i*dx delta_y = i*dy ## print(delta_x,w) ## assert abs(delta_x) <= w ## assert abs(delta_y) <= h result = pygame.Surface(r.size) xsgn, ysgn = 1, 1 if delta_x>0: xsgn = -1 if delta_y>0: ysgn = -1 result.blit(img,r.move(delta_x,delta_y)) result.blit(img,r.move(delta_x,delta_y+ysgn*h)) result.blit(img,r.move(delta_x+xsgn*w,delta_y)) result.blit(img,r.move(delta_x+xsgn*w,delta_y+ysgn*h)) images.append(result) if reverse: images += images[::-1][1:-1] return images def build_tiles(img_fullsize, sizes, nframes, dx_divider=0, dy_divider=0, reverse=False, sin=True, colorkey=None): """Returns a list of list of images on the form : imgs[size][frame]""" imgs = [] for size in sizes: #smoothscale is important here, otherwise FAST should be always True img = pygame.transform.smoothscale(img_fullsize, (size,)*2) dx = 0 if dx_divider: dx = int(size/dx_divider) dy = 0 if dy_divider: dy = int(size/dy_divider) imgs.append(get_shifted_tiles(img, nframes, dx, dy, reverse, sin)) if colorkey: for tiles in imgs: for img in tiles: img.set_colorkey(colorkey) return imgs def build_color_tiles(color, sizes, nframes, reverse=False, sin=True): imgs = [] for size in sizes: img = pygame.Surface((size,)*2) img.fill(color) imgs.append(get_shifted_tiles(img, nframes, 0, 0, reverse, sin)) return imgs def get_radiuses(nframes, initial_value, increment, reverse=False, sin=True): values = [] if sin: current = initial_value else: current = 0 for i in range(nframes): if sin: delta = increment*math.sin(2.*math.pi*i/float(nframes)) else: delta = increment current += delta values.append(int(current)) if reverse: values = values[::-1][1:-1] return values def build_tilers(grasses, waters, radius_divider, use_beach_tiler): nzoom = len(grasses) assert nzoom == len(waters) #same number of zoom levels nframes = len(grasses[0]) for z in range(nzoom): assert nframes == len(waters[z]) #same number of frames tilers = [[None for n in range(nframes)] for z in range(nzoom)] for z in range(nzoom): cell_size = grasses[z][0].get_width() radius = cell_size//radius_divider for n in range(nframes): if use_beach_tiler: tiler = BeachTiler(grasses[z][n], waters[z][n]) tiler.make(size=(cell_size,)*2, radius=radius) else: tiler = BaseTiler(grasses[z][n]) tiler.make(size=(cell_size,)*2, radius=0) tilers[z][n] = tiler return tilers def build_static_tilers(grasses, waters, radius_divider, use_beach_tiler): nzoom = len(grasses) assert nzoom == len(waters) #same number of zoom levels nframes = len(grasses[0]) for z in range(nzoom): assert nframes == len(waters[z]) #same number of frames tilers = [[None for n in range(nframes)] for z in range(nzoom)] for z in range(nzoom): cell_size = grasses[z][0].get_width() radius = cell_size//radius_divider if use_beach_tiler: tiler = BeachTiler(grasses[z][0], waters[z][0]) tiler.make(size=(cell_size,)*2, radius=radius) else: tiler = BaseTiler(grasses[z][0]) tiler.make(size=(cell_size,)*2, radius=0) for n in range(nframes): tilers[z][n] = tiler return tilers def build_tilers_fast(grasses, waters, radius_divider, use_beach_tiler): nzoom = len(grasses) assert nzoom == len(waters) #same number of zoom levels nframes = len(grasses[0]) for z in range(nzoom): assert nframes == len(waters[z]) #same number of frames tilers = [[None for n in range(nframes)] for z in range(nzoom)] cell_size = grasses[0][0].get_width() radius = cell_size//radius_divider for n in range(nframes): if use_beach_tiler: tiler = BeachTiler(grasses[0][n], waters[0][n]) tiler.make(size=(cell_size,)*2, radius=radius) else: tiler = BaseTiler(grasses[0][n]) tiler.make(size=(cell_size,)*2, radius=0) tilers[0][n] = tiler if nzoom > 1: for z in range(1,nzoom): for n in range(nframes): if use_beach_tiler: tiler = BeachTiler(grasses[z][n], waters[z][n]) else: tiler = BaseTiler(grasses[z][n]) size = grasses[z][n].get_size() ref = tilers[0][n] for key in ref.imgs: tiler.imgs[key] = pygame.transform.scale(ref.imgs[key], size) tilers[z][n] = tiler return tilers def load_tilers_dynamic(i, grasses, waters, folder): #pour static, nframes=1 nzoom = len(grasses) assert nzoom == len(waters) #same number of zoom levels nframes = len(grasses[0]) for z in range(nzoom): assert nframes == len(waters[z]) #same number of frames tilers = [[None for n in range(nframes)] for z in range(nzoom)] for z in range(nzoom): #PEUT ETRE LARGEMENT OPTIMIZE VU QUE ON POURRAIT LOADER UNE SEULE FOIS CHAQUE IMAGE, A LA PLACE DE z FOIS cell_size = grasses[z][0].get_width() for n in range(nframes): basename = os.path.join(folder,str(i)+"_"+str(n)+"_") tilers[z][n] = LoadTiler(basename, (cell_size,)*2) return tilers def load_tilers_static(i, grasses, waters, folder): #pour static, nframes=1 nzoom = len(grasses) assert nzoom == len(waters) #same number of zoom levels nframes = len(grasses[0]) for z in range(nzoom): assert nframes == len(waters[z]) #same number of frames tilers = [[None for n in range(nframes)] for z in range(nzoom)] for z in range(nzoom): #PEUT ETRE LARGEMENT OPTIMIZE VU QUE ON POURRAIT LOADER UNE SEULE FOIS CHAQUE IMAGE, A LA PLACE DE z FOIS cell_size = grasses[z][0].get_width() basename = os.path.join(folder,str(i)+"_"+str(0)+"_") tiler = LoadTiler(basename, (cell_size,)*2) for n in range(nframes): tilers[z][n] = tiler return tilers def get_material_couples(materials, radius_divider, fast, use_beach_tiler, load_tilers): materials.sort(key=lambda x:x.hmax) couples = [] imgs_zoom0_mat0 = materials[0].imgs[0] nframes = len(imgs_zoom0_mat0) max_cell_size = imgs_zoom0_mat0[0].get_width() for i in range(len(materials)-1): print(" Building tilers for couple", i) assert nframes == len(materials[i+1].imgs[0]) couple = MaterialCouple(i, materials[i], materials[i+1], radius_divider, max_cell_size, fast, use_beach_tiler, load_tilers) couples.append(couple) return couples def get_couple(h, couples): if h < 0.: return couples[0] else: for couple in couples: if couple.grass.hmax > h: return couple return couples[-1] class Material: def __init__(self, name, hmax, imgs, static): self.name = name self.hmax = hmax self.imgs = imgs self.static = static class MaterialCouple: def __init__(self, i, material1, material2, radius_divider, max_cell_size, fast, use_beach_tiler, load_tilers): if not has_surfarray and not load_tilers: raise Exception("Numpy was not found, and tilers are not loaded") assert material1.hmax != material2.hmax if material1.hmax > material2.hmax: self.grass, self.water = material1, material2 else: self.grass, self.water = material2, material1 # if load_tilers: if material1.static and material2.static: self.static = True self.tilers = load_tilers_static(i, self.grass.imgs, self.water.imgs, load_tilers) else: self.static = False self.tilers = load_tilers_dynamic(i, self.grass.imgs, self.water.imgs, load_tilers) else: build_tilers_static = build_static_tilers if fast: build_tilers_dynamic = build_tilers_fast else: build_tilers_dynamic = build_tilers if material1.static and material2.static: self.static = True self.tilers = build_tilers_static(self.grass.imgs, self.water.imgs, radius_divider, use_beach_tiler) else: self.static = False self.tilers = build_tilers_dynamic(self.grass.imgs, self.water.imgs, radius_divider, use_beach_tiler) self.transition = self.water.hmax self.max_cell_size = max_cell_size def get_tilers(self, zoom): return self.tilers[zoom] def get_cell_size(self, zoom): return self.tilers[zoom][0].imgs["c"].get_width() def get_all_frames(self, zoom, type_): return [self.tilers[zoom][t].imgs[type_] for t in range(len(self.tilers[zoom]))]
# sympy/galgebra/manifold.py """ manifold.py defines the Manifold class which allows one to create a vector manifold (manifold defined by vector field of coordinates in embedding vector space) calculate the tangent vectors and derivatives of tangent vectors. Once manifold is created multivector fields can be constructed in the tangent space and all the geometric algebra products and derivatives of the multivector fields calculated. Note that all calculations are done in the embedding space. Future versions of the code will allow manifolds defined purely in terms of a metric. """ from __future__ import print_function from itertools import combinations from os import system import copy from sympy import trigsimp, simplify from sympy.galgebra.ga import MV from sympy.galgebra.debug import oprint from sympy.galgebra.ncutil import linear_expand from sympy.galgebra.printing import find_executable def fct_to_str(fct_names): import sys current_file = open(sys.argv[0], 'r') file_str = current_file.read() current_file.close() if isinstance(fct_names, str): return fct_names fcts_str = '' for fct_name in fct_names: start_def = file_str.find('\ndef ' + fct_name) end_def = file_str.find('\ndef ', start_def + 5) start_class = file_str.find('\nclass ', start_def + 5) end_def = min(end_def, start_class) fcts_str += file_str[start_def:end_def] return fcts_str def VectorComponents(X, basis): (coefs, bases) = linear_expand(X.obj) cdict = {} for (coef, base) in zip(coefs, bases): cdict[str(base)] = coef comp = [] for base in basis: if base in cdict: comp.append(cdict[base]) else: comp.append(0) return comp def FillTemplate(self, template): Nd = 0 var = [] id_old = 0 while True: id_new = template.find('$', id_old + 1) if id_new == -1: break Nd += 1 if Nd % 2 == 0: var.append(template[id_old + 1:id_new]) id_old = id_new var.sort(reverse=True) for v in var: template = template.replace('$' + v + '$', str(eval('self.' + v))) return template class Manifold: def __init__(self, x, coords, debug=False, I=None): """ coords: list of coordinate variables x: vector fuction of coordinate variables (parametric surface) """ self.I = I self.x = x self.coords = coords self.basis = [] self.basis_str = [] self.embedded_basis = [] for u in coords: tv = x.diff(u) self.basis.append(tv) (coefs, bases) = linear_expand(tv.obj) tc = {} for (coef, base) in zip(coefs, bases): str_base = str(base) tc[str_base] = coef if str_base not in self.embedded_basis: self.embedded_basis.append(str_base) self.basis_str.append(tc) self.gij = [] for base1 in self.basis: tmp = [] for base2 in self.basis: tmp.append(simplify(trigsimp((base1 | base2).scalar()))) self.gij.append(tmp) for tv in self.basis_str: for base in self.embedded_basis: if base not in tv: tv[base] = 0 self.dim = len(self.basis) indexes = tuple(range(self.dim)) self.index = [()] for i in indexes: self.index.append(tuple(combinations(indexes, i + 1))) self.index = tuple(self.index) self.MFbasis = [[MV.ONE], self.basis] for igrade in self.index[2:]: grade = [] for iblade in igrade: blade = MV(1, 'scalar') for ibasis in iblade: blade ^= self.basis[ibasis] blade = blade.trigsimp(deep=True, recursive=True) grade.append(blade) self.MFbasis.append(grade) self.E = self.MFbasis[-1][0] self.E_sq = trigsimp((self.E * self.E).scalar(), deep=True, recursive=True) duals = copy.copy(self.MFbasis[-2]) duals.reverse() sgn = 1 self.rbasis = [] for dual in duals: recpv = (sgn * dual * self.E).trigsimp(deep=True, recursive=True) self.rbasis.append(recpv) sgn = -sgn self.dbasis = [] for base in self.basis: dbase = [] for coord in self.coords: d = base.diff(coord).trigsimp(deep=True, recursive=True) dbase.append(d) self.dbasis.append(dbase) self.surface = {} (coefs, bases) = linear_expand(self.x.obj) for (coef, base) in zip(coefs, bases): self.surface[str(base)] = coef self.grad = MV() self.grad.is_grad = True self.grad.blade_rep = True self.grad.igrade = 1 self.grad.rcpr_bases_MV = [] for rbase in self.rbasis: self.grad.rcpr_bases_MV.append(rbase / self.E_sq) self.grad.rcpr_bases_MV = tuple(self.grad.rcpr_bases_MV) self.grad.coords = self.coords self.grad.norm = self.E_sq self.grad.connection = {} if debug: oprint('x', self.x, 'coords', self.coords, 'basis vectors', self.basis, 'index', self.index, 'basis blades', self.MFbasis, 'E', self.E, 'E**2', self.E_sq, '*basis', duals, 'rbasis', self.rbasis, 'basis derivatives', self.dbasis, 'surface', self.surface, 'basis strings', self.basis_str, 'embedding basis', self.embedded_basis, 'metric tensor', self.gij) def Basis(self): return tuple(self.basis) def Grad(self, F): # Intrisic Derivative dF = 0 for (rbase, coord) in zip(self.rbasis, self.coords): dF += rbase * F.diff(coord) dF = dF.simplify() dF = dF / self.E_sq return dF def D(self, F): # Covariant Derivative dF = self.Grad(F) return self.Proj(dF) def S(self, a): # Shape Tensor return def Proj(self, F): PF = (F < self.E) * self.E PF = PF.simplify() PF = PF.trigsimp(deep=True, recursive=True) return (PF / self.E_sq).simplify() def Reject(self, F): return (F - self.Proj(F)).simplify() def DD(self, v, f, opstr=False): mf_comp = [] for e in self.rbasis: mf_comp.append((v | e).scalar() / self.E_sq) result = MV() op = '' for (coord, comp) in zip(self.coords, mf_comp): result += comp * (f.diff(coord)) if opstr: op += '(' + str(comp) + ')D{' + str(coord) + '}+' if opstr: return str(result), op[:-1] return result def Plot2DSurface(self, u_range, v_range, surf=True, grid=True, tan=1.0, scalar_field=None, skip=[1, 1], fct_def=None): plot_template = \ """ from numpy import mgrid,shape,swapaxes,zeros,log,exp,sin,cos,tan $fct_def$ eps = 1.0e-6 u_r = $u_range$ v_r = $v_range$ $coords$ = mgrid[u_r[0]:u_r[1]+eps:(u_r[1]-u_r[0])/float(u_r[2]-1),\\ v_r[0]:v_r[1]+eps:(v_r[1]-v_r[0])/float(v_r[2]-1)] X = $surface$ scal_tan = $tan$ x = X['ex'] y = X['ey'] z = X['ez'] du = $basis_str[0]$ dv = $basis_str[1]$ Zero = zeros(shape(x)) if scal_tan > 0.0: du_x = Zero+du['ex'] du_y = Zero+du['ey'] du_z = Zero+du['ez'] dv_x = Zero+dv['ex'] dv_y = Zero+dv['ey'] dv_z = Zero+dv['ez'] f = $scalar_field$ n = $n$ skip = $skip$ su = skip[0] sv = skip[1] if f[0] != None: dn_x = f[0]*n[0] dn_y = f[0]*n[1] dn_z = f[0]*n[2] from mayavi.mlab import plot3d,quiver3d,mesh,figure figure(bgcolor=(1.0,1.0,1.0)) if $surf$: mesh(x,y,z,colormap="gist_earth") if $grid$: for i in range(shape(u)[0]): plot3d(x[i,],y[i,],z[i,],line_width=1.0,color=(0.0,0.0,0.0),tube_radius=None) xr = swapaxes(x,0,1) yr = swapaxes(y,0,1) zr = swapaxes(z,0,1) for i in range(shape(u)[1]): plot3d(xr[i,],yr[i,],zr[i,],line_width=1.0,color=(0.0,0.0,0.0),tube_radius=None) if scal_tan > 0.0: quiver3d(x[::su,::sv],y[::su,::sv],z[::su,::sv],\\ du_x[::su,::sv],du_y[::su,::sv],du_z[::su,::sv],scale_factor=scal_tan,\\ line_width=1.0,color=(0.0,0.0,0.0),scale_mode='vector',mode='arrow',resolution=16) quiver3d(x[::su,::sv],y[::su,::sv],z[::su,::sv],\\ dv_x[::su,::sv],dv_y[::su,::sv],dv_z[::su,::sv],scale_factor=scal_tan,\\ line_width=1.0,color=(0.0,0.0,0.0),scale_mode='vector',mode='arrow',resolution=16) if f[0] != None: quiver3d(x[::su,::sv],y[::su,::sv],z[::su,::sv],\\ dn_x[::su,::sv],dn_y[::su,::sv],dn_z[::su,::sv],\\ line_width=1.0,color=(0.0,0.0,0.0),scale_mode='none',mode='cone',\\ resolution=16,opacity=0.5) """ if len(self.coords) != 2: return self.skip = skip self.surf = surf self.grid = grid self.tan = tan if fct_def is None: self.fct_def = ' ' else: self.fct_def = fct_to_str(fct_def) self.u_range = u_range self.v_range = v_range self.scalar_field = [scalar_field] print(self.I, '\n', self.basis[0], '\n', self.basis[1]) self.normal = -self.I * (self.basis[0] ^ self.basis[1]) self.n = VectorComponents(self.normal, ['ex', 'ey', 'ez']) msurf = open('manifold_surf.py', 'w') plot_template = FillTemplate(self, plot_template) msurf.write(plot_template) msurf.close() mayavi2 = find_executable('mayavi2') if mayavi2 is None: return system(mayavi2 + ' manifold_surf.py &') return
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class BastionHostsOperations: """BastionHostsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_03_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _delete_initial( self, resource_group_name: str, bastion_host_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore async def begin_delete( self, resource_group_name: str, bastion_host_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes the specified Bastion Host. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param bastion_host_name: The name of the Bastion Host. :type bastion_host_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, bastion_host_name=bastion_host_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore async def get( self, resource_group_name: str, bastion_host_name: str, **kwargs: Any ) -> "_models.BastionHost": """Gets the specified Bastion Host. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param bastion_host_name: The name of the Bastion Host. :type bastion_host_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: BastionHost, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_03_01.models.BastionHost :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionHost"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('BastionHost', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, bastion_host_name: str, parameters: "_models.BastionHost", **kwargs: Any ) -> "_models.BastionHost": cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionHost"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'BastionHost') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('BastionHost', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('BastionHost', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, bastion_host_name: str, parameters: "_models.BastionHost", **kwargs: Any ) -> AsyncLROPoller["_models.BastionHost"]: """Creates or updates the specified Bastion Host. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param bastion_host_name: The name of the Bastion Host. :type bastion_host_name: str :param parameters: Parameters supplied to the create or update Bastion Host operation. :type parameters: ~azure.mgmt.network.v2020_03_01.models.BastionHost :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either BastionHost or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.BastionHost] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionHost"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, bastion_host_name=bastion_host_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('BastionHost', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}'} # type: ignore def list( self, **kwargs: Any ) -> AsyncIterable["_models.BastionHostListResult"]: """Lists all Bastion Hosts in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either BastionHostListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.BastionHostListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionHostListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('BastionHostListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/bastionHosts'} # type: ignore def list_by_resource_group( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.BastionHostListResult"]: """Lists all Bastion Hosts in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either BastionHostListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.BastionHostListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionHostListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_resource_group.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('BastionHostListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts'} # type: ignore
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from marvin.codes import FAILED from marvin.cloudstackTestCase import cloudstackTestCase from marvin.lib.base import Account, VirtualMachine, ServiceOffering, Host, Cluster from marvin.lib.common import get_zone, get_domain, get_test_template from marvin.lib.utils import cleanup_resources from nose.plugins.attrib import attr class TestDeployVmWithVariedPlanners(cloudstackTestCase): """ Test to create services offerings for deployment planners - firstfit, userdispersing """ @classmethod def setUpClass(cls): testClient = super(TestDeployVmWithVariedPlanners, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.hypervisor = testClient.getHypervisorInfo() cls.template = get_test_template( cls.apiclient, cls.zone.id, cls.hypervisor ) if cls.template == FAILED: assert False, "get_test_template() failed to return template" cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["template"] = cls.template.id cls.services["zoneid"] = cls.zone.id cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id ) cls.hosts = Host.list(cls.apiclient, type='Routing') cls.clusters = Cluster.list(cls.apiclient) cls.cleanup = [ cls.account ] @attr(tags=["advanced", "basic", "sg"], required_hardware="false") def test_deployvm_firstfit(self): """Test to deploy vm with a first fit offering """ #FIXME: How do we know that first fit actually happened? self.service_offering_firstfit = ServiceOffering.create( self.apiclient, self.services["service_offerings"]["tiny"], deploymentplanner='FirstFitPlanner' ) self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_firstfit.id, templateid=self.template.id ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertEqual( isinstance(list_vms, list), True, "List VM response was not a valid list" ) self.assertNotEqual( len(list_vms), 0, "List VM response was empty" ) vm = list_vms[0] self.assertEqual( vm.state, "Running", msg="VM is not in Running state" ) @attr(tags=["advanced", "basic", "sg"], required_hardware="false") def test_deployvm_userdispersing(self): """Test deploy VMs using user dispersion planner """ self.service_offering_userdispersing = ServiceOffering.create( self.apiclient, self.services["service_offerings"]["tiny"], deploymentplanner='UserDispersingPlanner' ) self.virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userdispersing.id, templateid=self.template.id ) self.virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userdispersing.id, templateid=self.template.id ) list_vm_1 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_1.id) list_vm_2 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_2.id) self.assertEqual( isinstance(list_vm_1, list), True, "List VM response was not a valid list" ) self.assertEqual( isinstance(list_vm_2, list), True, "List VM response was not a valid list" ) vm1 = list_vm_1[0] vm2 = list_vm_2[0] self.assertEqual( vm1.state, "Running", msg="VM is not in Running state" ) self.assertEqual( vm2.state, "Running", msg="VM is not in Running state" ) vm1clusterid = filter(lambda c: c.id == vm1.hostid, self.hosts)[0].clusterid vm2clusterid = filter(lambda c: c.id == vm2.hostid, self.hosts)[0].clusterid if vm1clusterid == vm2clusterid: self.debug("VMs (%s, %s) meant to be dispersed are deployed in the same cluster %s" % ( vm1.id, vm2.id, vm1clusterid)) @attr(tags=["advanced", "basic", "sg"], required_hardware="false") def test_deployvm_userconcentrated(self): """Test deploy VMs using user concentrated planner """ self.service_offering_userconcentrated = ServiceOffering.create( self.apiclient, self.services["service_offerings"]["tiny"], deploymentplanner='UserConcentratedPodPlanner' ) self.virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userconcentrated.id, templateid=self.template.id ) self.virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userconcentrated.id, templateid=self.template.id ) list_vm_1 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_1.id) list_vm_2 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_2.id) self.assertEqual( isinstance(list_vm_1, list), True, "List VM response was not a valid list" ) self.assertEqual( isinstance(list_vm_2, list), True, "List VM response was not a valid list" ) vm1 = list_vm_1[0] vm2 = list_vm_2[0] self.assertEqual( vm1.state, "Running", msg="VM is not in Running state" ) self.assertEqual( vm2.state, "Running", msg="VM is not in Running state" ) vm1clusterid = filter(lambda c: c.id == vm1.hostid, self.hosts)[0].clusterid vm2clusterid = filter(lambda c: c.id == vm2.hostid, self.hosts)[0].clusterid vm1podid = filter(lambda p: p.id == vm1clusterid, self.clusters)[0].podid vm2podid = filter(lambda p: p.id == vm2clusterid, self.clusters)[0].podid self.assertEqual( vm1podid, vm2podid, msg="VMs (%s, %s) meant to be pod concentrated are deployed on different pods (%s, %s)" % (vm1.id, vm2.id, vm1clusterid, vm2clusterid) ) @classmethod def tearDownClass(cls): try: cleanup_resources(cls.apiclient, cls.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e)
""" Foscam camera's motion state support. """ import base64 import logging from asyncio import TimeoutError, coroutine, sleep from collections import deque from json import dumps from typing import List import async_timeout import voluptuous as vol from aiohttp import ClientSession from libpyfoscam.foscam import FoscamError from homeassistant.components.camera import (PLATFORM_SCHEMA) from homeassistant.const import ( CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_PORT, CONF_UNIT_OF_MEASUREMENT, CONF_API_KEY, CONF_HOST, CONF_TIMEOUT, CONF_ICON) from homeassistant.core import callback from homeassistant.helpers import config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_call_later _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['libpyfoscam==1.0'] NUMBER_OF_SNAPS = 3 CONF_KEEP_ON_FOR = 'keep_on_for' CONF_IMAGES_SOURCE_PATH = 'images_source_path' CONF_IMAGES_DESTINATION_PATH = 'images_destination_paths' CONF_HTTP_TIMEOUT = 'http_timeout' CONF_TIME_BETWEEN_SNAPS = 'time_between_snaps' DEFAULT_NAME = 'Foscam Motion Sensor' DEFAULT_PORT = 88 DEFAULT_KEEP_ON_FOR = 5 DEFAULT_TIME_BEETWEEN_SNAPS = 3 DEFAULT_TIMEOUT = (DEFAULT_TIME_BEETWEEN_SNAPS * NUMBER_OF_SNAPS) + 5 DEFAULT_SCAN_INTERVAL = 1 DEFAULT_HTTP_TIMEOUT = 5 FOSCAM_MOTION_DETECTED_STATE = 2 FOSCAM_ARMED_STATE = 1 SENSOR_STATE_UNKNOWN = 'unknown' SENSOR_STATE_IDLE = 'idle' SENSOR_STATE_ARMED = 'armed' SENSOR_STATE_MOTION = 'motion detected' SENSOR_STATE_PERSON = 'person detected' UNKNOWN_OBJECT = 'unknown' PERSON_OBJECT = 'person' URL = "https://dev.sighthoundapi.com/v1/detections?type=face,person" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_IMAGES_DESTINATION_PATH): cv.isdir, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_KEEP_ON_FOR, default=DEFAULT_KEEP_ON_FOR): cv.positive_int, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Optional(CONF_HTTP_TIMEOUT, default=DEFAULT_HTTP_TIMEOUT): cv.positive_int, vol.Optional(CONF_TIME_BETWEEN_SNAPS, default=DEFAULT_TIME_BEETWEEN_SNAPS): cv.positive_int, }) @coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """Set up the Foscam sensor.""" foscam_sensor = FoscamSensor(hass, config) foscam_sensor.start_updating() class FoscamSensor(Entity): """Representation of a Foscam sensor.""" def __init__(self, hass, config): from libpyfoscam import FoscamCamera """Initialize the Foscam sensor.""" self._name = config.get(CONF_NAME) self._unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT) self._icon = config.get(CONF_ICON) self._state = SENSOR_STATE_IDLE self._keep_on_for = config.get(CONF_KEEP_ON_FOR) self._time_between_snaps = config.get(CONF_TIME_BETWEEN_SNAPS) self.hass = hass self.entity_id = 'sensor.foscam_motion' ip = config.get(CONF_HOST) port = config.get(CONF_PORT) username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) self._foscam_client = FoscamCamera(host=ip, port=port, usr=username, pwd=password, verbose=False) self._image_destination_path = config.get(CONF_IMAGES_DESTINATION_PATH) self._person_detector = PersonDetector(api_key=config.get(CONF_API_KEY)) self._previous_snaps = deque(maxlen=3) @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the state of the device.""" return self._state @property def icon(self): """Return the icon to use in the frontend, if any.""" return self._icon @property def unit_of_measurement(self): """Return the unit this state is expressed in.""" return self._unit_of_measurement @callback def start_updating(self, *_): self.hass.loop.create_task(self.async_update_state()) delay = DEFAULT_SCAN_INTERVAL + (5 if self._is_motion_detected(self._state) else 0) async_call_later(self.hass, delay, self.start_updating) @staticmethod def _is_motion_detected(foscam_state): return foscam_state == FOSCAM_MOTION_DETECTED_STATE async def async_update_state(self): try: motion_detect_state = await self._get_camera_state() self._previous_snaps.appendleft(await self._get_camera_snapshot()) except (TimeoutError, FoscamError): _LOGGER.exception("Error retrieving state from camera") self._state = SENSOR_STATE_UNKNOWN self.async_schedule_update_ha_state() else: if not self._is_motion_detected(motion_detect_state): new_state = SENSOR_STATE_ARMED if motion_detect_state == FOSCAM_ARMED_STATE else SENSOR_STATE_IDLE self._update_state(new_state) elif self._state not in [SENSOR_STATE_MOTION, SENSOR_STATE_PERSON]: self._update_state(SENSOR_STATE_MOTION) number_of_errors = 0 file_name_idx = 1 person_detected = False snapshots = [s for s in reversed(self._previous_snaps)] for i in range(NUMBER_OF_SNAPS - 1): snap = await self._get_camera_snapshot() snapshots.append(snap) sleep(2) for snap in snapshots: if not person_detected: person_detected = await self._person_detector.process(snap) self._update_state(SENSOR_STATE_PERSON if person_detected else SENSOR_STATE_MOTION) if person_detected: for snapshot in snapshots: _save_image(snapshot, "{destination}/{idx}.jpg".format( destination=self._image_destination_path, idx=file_name_idx)) file_name_idx += 1 snapshots.clear() if number_of_errors == NUMBER_OF_SNAPS: self._update_state(SENSOR_STATE_PERSON) async def _get_camera_state(self): async with async_timeout.timeout(DEFAULT_HTTP_TIMEOUT): code, params = await self.hass.async_add_job(self._foscam_client.get_dev_state) if code > 0: raise FoscamError(code) return int(params['motionDetectAlarm']) def _update_state(self, new_state): if new_state != self._state: self._state = new_state self.async_schedule_update_ha_state() async def _get_camera_snapshot(self): async with async_timeout.timeout(DEFAULT_HTTP_TIMEOUT): code, raw_image = await self.hass.async_add_job(self._foscam_client.snap_picture_2) if code > 0: raise FoscamError(code) return raw_image class PersonDetector: """Representation of a motion sensor that is updated via MQTT.""" def __init__(self, api_key): """Initialize the detector.""" self._headers = { "Content-type": "application/json", "X-Access-Token": api_key } async def process(self, raw_image): """Process image.""" image_payload = _get_image_payload(raw_image) async with ClientSession() as session: identified_objects = await self._detect_objects(session, image_payload) person_detected = any(obj in [UNKNOWN_OBJECT, PERSON_OBJECT] for obj in identified_objects) return person_detected async def _detect_objects(self, session: ClientSession, image_payload: dict) -> List[str]: try: async with async_timeout.timeout(DEFAULT_HTTP_TIMEOUT): async with session.post(URL, headers=self._headers, data=dumps(image_payload)) as response: json_response = await response.json() if 'error' in json_response: _LOGGER.error("Received error message when processing image: {error}" .format(error=json_response['error'])) return [UNKNOWN_OBJECT] _LOGGER.debug("Processed image. Response: {response}".format(response=dumps(json_response))) return [obj['type'] for obj in json_response.get('objects')] except (TimeoutError, ValueError): _LOGGER.exception("Failed to process image") return [UNKNOWN_OBJECT] def _get_image_payload(raw_image: bytes) -> dict: encoded_image = base64.b64encode(raw_image) return {'image': encoded_image.decode("utf-8")} def _save_image(raw_image: bytes, file_name: str) -> None: with open(file_name, 'wb') as f: f.write(raw_image)
""" voweltimbre.py - module for comparative analysis of sung timbres in a musical audio context. Evaluate extraction of sung vowel formants compared to a ground truth mark-up usage: (r,p), Z, Z2 = test_vowel_analysis(harmonic=True,tsne=True,dfw=True,perplexity=3) inputs: harmonic - whether to use predominant harmonic analysis (sung melody extraction) tsne - whether to return t-SNE or just DTW spectrum [True] dfw - whether to perform dynamic FREQUENCY warping (dfw) after DTW [True] perplexity - how many near neighbours in t-SNE [3] outputs: (r,p) = pearson correlation coefficient and p-value Z = ground truth (markup) vowel frequencies embedding Z2 = extracted vowel features embedding Author: Michael A. Casey - Bregman Media Labs, Dartmouth College, Hanover, USA Copyright (C) 2015, Dartmouth College, All Rights Reserved License: MIT """ from bregman import distance, testsignal, features from pylab import * import glob, csv, pdb import scipy.stats as ss try: from essentia.standard import * except: print "warning: essentia module not found" try: import dpcore except: print "warning: no dynamic programming module" try: from tsne import bh_sne except: print "warning: no tsne module" def predominant_harmonics(audio=[], filename='DKW_EttaJames_1961.wav', minFreq=200., maxFreq=2000., SR=44100., N=4096, H=2048): """ Extract the predominant harmonic spectrum from a polyphonic mixture inputs: audio - the audio data [empty=use filename] filename - file to analyze ['DKW_EttaJames_1961.wav'] minFreq - minimum frequency for harmonic peaks [200] maxFreq - maximum frequency for harmonic peaks [2000] SR, N, H - fft spectrum parameters outputs: pool - an essentia pool structure containing: freqs - predominant harmonics frequencies mags - predominant harmonics magnitudes """ SR = float(SR) if not len(audio): loader = essentia.standard.MonoLoader(filename=filename) audio = loader() pool = essentia.Pool() predominant_melody = PredominantMelody(frameSize=N, hopSize=H, minFrequency=80.0, maxFrequency=20000., guessUnvoiced=False, voiceVibrato=True) p_melody = predominant_melody(audio) w = Windowing(type = 'hann') pkRange, minPos, maxPos = _calcPeakRange(SR, N, minFreq, 10000.) spectrum = Spectrum() peaks = PeakDetection(minPosition=minPos, maxPosition=maxPos, range=pkRange, maxPeaks=100, threshold=-100.) #harmonic_peaks = HarmonicPeaks(maxHarmonics=20) # this essentia class is broken, segfaults due to array error for i, frame in enumerate(FrameGenerator(audio, frameSize = N, hopSize = H)): spec = spectrum(w(frame)) pk = peaks(20*log10(spec+finfo(float).eps)) hpk = get_harmonic_peaks(pk[0] * (SR / N), 10**(pk[1]/20.), p_melody[0][i]) # python harmonic peaks, below pool.add('freqs', hpk[0]) pool.add('mags', hpk[1]) pool.add('melody', p_melody[0]) pool.add('melody_confidence', p_melody[1]) pool.add('fname', filename.split('.')[0].replace('DKW_','')) return pool def get_harmonic_peaks(freqs, mags, f0, maxHarmonics=20, tolerance=.2): """ Select harmonic peaks of given f0 from given spectral peak freqs and mags This is a Python port of the essentia harmonic peaks algorithm inputs: freqs - list of peak frequencies mags - list of peak magnitudes f0 - estimated predominant fundamental frequency maxHarmonics - how many harmonics to return [20] tolerance - proportion of frequency deviation to allow (0,0.5) [0.2] outputs: harmonic_freqs, harmonic_mags - vectors of harmonic peak frequencies and magnitudes """ f0 = float(f0) # just to make sure if tolerance<0.0 or tolerance>0.5: raise ValueError("HarmonicPeaks: tolerance must be in range (0,0.5)") if maxHarmonics<0 or maxHarmonics>100: raise ValueError("HarmonicPeaks: maxHarmonics must be in range [0,100]") if f0<0: raise ValueError("HarmonicPeaks: input pitch must be greater than zero") if len(freqs) != len(mags): raise ValueError("HarmonicPeaks: frequency and magnitude input vectors must have the same size") if f0 == 0: # pitch is unknown -> no harmonic peaks found return array([], dtype='f4'),array([], dtype='f4') if len(freqs)==0: # no peaks -> no harmonic peaks either return array([], dtype='f4'),array([], dtype='f4') if freqs[0] <= 0: raise ValueError("HarmonicPeaks: spectral peak frequencies must be greater than 0Hz") for i in xrange(1, len(freqs)): if freqs[i] < freqs[i-1]: raise ValueError("HarmonicPeaks: spectral peaks input must be ordered by frequency") if freqs[i] == freqs[i-1]: raise ValueError("HarmonicPeaks: duplicate spectral peak found, peaks cannot be duplicated") if freqs[i] <= 0: raise ValueError("HarmonicPeaks: spectral peak frequencies must be greater than 0Hz") ratioTolerance = tolerance candidates = [(-1,0)] * maxHarmonics # immutable for safety (assign new for replacement) ratioMax = maxHarmonics + ratioTolerance for i in xrange(len(freqs)): ratio = freqs[i] / f0 harmonicNumber = int(round(ratio)) distance = abs(ratio - harmonicNumber) if harmonicNumber < maxHarmonics: # Added by MKC 5/2/15, this is the ESSENTIA BUG if distance <= ratioTolerance and ratio <= ratioMax: if candidates[harmonicNumber-1][0] == -1 or distance < candidates[harmonicNumber-1][1]: # first occured candidate or a better candidate for harmonic candidates[harmonicNumber-1] = (i,distance) elif distance == candidates[harmonicNumber-1][1]: # select the one with max amplitude if mags[i] > mags[candidates[harmonicNumber-1][0]]: candidates[harmonicNumber-1] = (i, distance) harmonicFrequencies = [] harmonicMagnitudes = [] for h in range(maxHarmonics): i = candidates[h][0] if i < 0: # harmonic not found, output ideal harmonic with 0 magnitude harmonicFrequencies.append((h+1) * f0) harmonicMagnitudes.append(0.) else: harmonicFrequencies.append(freqs[i]) harmonicMagnitudes.append(mags[i]) return array(harmonicFrequencies, dtype='f4'), array(harmonicMagnitudes, dtype='f4') def _msg(s, newln=True): """ print messages instantly (flush stdout buffer) """ print s, if newln: print sys.stdout.flush() def peaks_cqft(fname, freqs, mags, sonify=False, conv=False, N=4096, H=2048, SR=44100.): """ Peaks to constant-Q transform, or audio inputs: fname - name of audio file freqs - peak freqs per time-point mags - peak magnitudes sonify- return audio instead of cqft [False] N - fft length H - fft hop SR - audio sample rate outputs: bregman.LogFrequencySpectrum """ F = features.LogFrequencySpectrum(fname, nfft=N, wfft=N, nhop=H) Xhat = zeros(F.X.shape) T = Xhat.shape[1] for t in xrange(len(freqs)): if t<T: for f,a in zip(freqs[t],mags[t]): Xhat[argmin(abs(F._logfrqs-f)),t]=a if conv: for t in xrange(len(freqs)): if t<T: Xhat[:,t] = convolve(Xhat[:,t], testsignal.gauss_pdf(10,5,1),'same') if sonify: Xhat = F.inverse(Xhat) # use original phases return Xhat.T def peaks_sinusoids(freqs, mags, N=4096, H=2048, SR=44100.): """ Sinusoidal resynthesis of spectral peaks inputs: freqs - peak freqs per time-point mags - peak magnitudes N - fft length H - fft hop SR - audio sample rate outputs: audio """ SR = float(SR) phases = rand(100)*2*pi-pi # initial phase z = zeros(len(freqs)*H+N-H, dtype='f') for t, (freqs,mags) in enumerate(zip(freqs,mags)): x = zeros(N) for k in xrange(len(freqs)): x += mags[k]*testsignal.sinusoid(f0=freqs[k], sr=SR, num_points=N, phase_offset=phases[k]) phases[k] = (phases[k] + pi + N/SR * 2 * pi * freqs[k]) % (2*pi) - pi z[t*H:t*H+N] += hamming(N)*x return z def _calcPeakRange(sr,n, loFreq=200., hiFreq=2000.): """ _calcPeakRange - utility function to calculate parameters for peaks inputs: sr - sample rate n - fft length loFreq - lowest peak frequency hiFreq - highest peak frequency """ fftBase = sr / float(n) pkRange = n/2 + 1 minPos = int(round(loFreq/fftBase)) maxPos = int(round(hiFreq/fftBase)) return pkRange, minPos, maxPos def _dct_coefs(N): """ Create discrete cosine transform coefficients for N x N matrix """ d = array([cos(pi/N*(arange(N)+0.5)*k) for k in arange(N)],dtype='f4') d[0] *= 1/sqrt(2) d *= sqrt(2.0/N) return d def _imagesc(X): """ Emulate Matlab's imagesc() function """ imshow(X, aspect = 'auto', origin='bottom', interpolation='nearest') colorbar() def vowel_analysis(audio=[], filename='DKW_EttaJames_1961.wav', nBands=48, nCoefs=10, N=4096, H=2048): """ Extract vowel analysis as frame#, F1, F2, F3 inputs: audio - the audio data [empty=use filename] filename - audio filename ['DKW_EttaJames_1961.wav'] nBands - spectrum bands per octave [48] nCoefs - cepstrum number of coefficients to retain [10] outputs: pool - an essentia pool structure containing: mel_bands - Mel frequency bands mel_coefs - Mel cepstral coefficients (direct DCT) mfccs - Mel cepstral coefficients (essentia) lqft - inverse of direct DCT mel_coefs peaks - peaks, sorted by frequency, of lqft """ if not len(audio): loader = essentia.standard.MonoLoader(filename=filename) audio = loader() pool = essentia.Pool() w = Windowing(type = 'hann') spectrum = Spectrum() mfcc = MFCC(inputSize=N/2+1) melbands = MelBands(inputSize=N/2+1, numberBands=nBands) D = _dct_coefs(nBands)[:nCoefs].T peaks = PeakDetection(range=nBands, minPosition=0, maxPosition=nBands-1, maxPeaks=10) for frame in FrameGenerator(audio, frameSize = N, hopSize = H): spec = spectrum(w(frame)) mel_bands = 20*log10(melbands(spec+finfo(float).eps)) mel_coefs = dot(D.T,mel_bands) pool.add('mel_bands', mel_bands) pool.add('mel_coefs', mel_coefs) pool.add('mfccs', mfcc(spec)[1]) lqft = dot(D, mel_coefs) pool.add('peaks',peaks(lqft)[0]) # lqft = 10**(lqft/20.) # lqft = lqft / lqft.max() pool.add('lqft', lqft) if filename is not None: pool.add('fname', filename.split('.')[0].replace('DKW_','')) return pool def _plot_vowels(pool, saveplot=False): mel_bands = essentia.array(pool['mel_bands']).T mel_coefs = essentia.array(pool['mel_coefs']).T mfccs = essentia.array(pool['mfccs']).T lqft = essentia.array(pool['lqft']).T peaks = pool['peaks'] if(plotting): figure() # Mel Bands subplot(221) _imagesc(mel_bands) title('Mel Freq. Bands',fontsize=14) # LQFT subplot(222) _imagesc(lqft) title('Low Quefrency, Peaks', fontsize=14) # Show Peaks [plot(t,qq,'bo') for t,q in enumerate(peaks) for qq in q] axis('tight') # MFCCs subplot(223) _imagesc(mfccs[1:,:]) title('13 MFCC Coefs', fontsize=14) # DCT Projected MFCCs subplot(224) _imagesc(mel_coefs[1:,:]) nCoefs = mel_coefs.shape[0] title('%d MFCC Coefs'%nCoefs, fontsize=14) suptitle(pool['fname'][0],fontsize=16) if saveplot: savefig('%s.png'%pool['fname'][0]) def _save_data(pool, key='lqft', delim=','): if savedata: with open('%s.%s'%(pool['fname'][0],key), 'wt') as f: for p in pool[key]: for i, a in enumerate(p): if i != 0: fwrite(delim) f.write(str(a)) f.write('\n') def align_vowels(X,Y, dfun=distance.euc_normed): """ DTW align vowels based on spectral data in X and Y Uses dpcore dynamic programming library inpus: X, Y - spectral data in rows (num columns must be equal) outputs: p, q, c - Dynamic Time Warp coefs: X[p]<-->Y[q], c=cost """ Df = dfun(X,Y) p,q,C,phi = dpcore.dp(Df, penalty=0.1, gutter=0.1) return {'p':p,'q':q,'C':C} def analyze_all(V, dfun=distance.euc_normed, dfw=True, **kwargs): """ Perform DTW(+DFW) timbre analysis on list of audio files, return pair-wise costs inputs: V - vowel analysis pool dfun - distance function to use [bregman.distance.euc_normed] dfw - whether to perform dynamic FREQUENCY warping (dfw) after DTW [True] **kwargs - arguments to dpcore.dp DTW algorithm outputs: Z - pair-wise cost matrix (timbre time/frequency warp) between audio files """ Z = zeros((len(V),len(V))) kwargs['penalty'] = kwargs.pop('penalty',0.1) kwargs['gutter'] = kwargs.pop('gutter',0.0) for i,a in enumerate(V): for j,b in enumerate(V): D = dfun(a['mfccs'],b['mfccs']) p,q,c,phi = dpcore.dp(D, **kwargs) if not dfw: Z[i,j] = diag(dfun(a['lqft'][p],b['lqft'][q])).mean() else: alpha = optimal_vtln(b['lqft'][q], a['lqft'][p], 'symmetric') print "Optimal DFW (VTLN) warp alpha=", alpha Vi_warped = vtln(b['lqft'][q], 'symmetric', alpha) Z[i,j] = diag(dfun(Vi_warped,b['lqft'][q])).mean() return Z def tsne_all(V, ref_idx=9, dfun=distance.euc_normed, dfw=True, tsne=True, perplexity=3, plotting=False, **kwargs): """ Perform tsne / DTW / DFW timbre analysis on list of audio files, return pair-wise costs Uses tsne library inputs: V - vowel analysis pool ref_idx - reference audio file, all audio warped to this [9] dfun - distance function to use [bregman.distance.euc_normed] dfw - whether to perform dynamic FREQUENCY warping (dfw) after DTW [True] tsne - whether to return t-SNE or just DTW spectrum [True] perplexity - how many near neighbours in t-SNE [3] plotting - whether to plot the t-SNE result [False] **kwargs - arguments to dpcore.dp DTW algorithm outputs: Z - 2D projection map of audio files """ Z = [] kwargs['penalty'] = kwargs.pop('penalty',0.1) kwargs['gutter'] = kwargs.pop('gutter',0.0) for i in xrange(len(V)): C = dfun(V[ref_idx]['mfccs'],V[i]['mfccs']) p,q,c,phi = dpcore.dp(C, **kwargs) if not dfw: Z.append(V[i]['lqft'][q]) else: alpha = optimal_vtln(V[i]['lqft'][q], V[ref_idx]['lqft'][p], 'symmetric') print "Optimal DFW (VTLN) warp alpha=", alpha Vi_warped = vtln(V[i]['lqft'][q], 'symmetric', alpha) Z.append(Vi_warped) if tsne: X = array([zz.flatten() for zz in Z], dtype='f8') Z = _tsne(X, perplexity=perplexity, plotting=plotting) return Z def optimal_vtln(Y,X, warpFunction='asymmetric'): """ Return optimal frequency warp between spectrum data Y and target spectrum X. inputs: Y - spectrogram data to warp X - target spectrogram warpFunction - which warp method to use ['asymmetric'] outputs: alpha - optimal warp factor """ min_mse = inf for alpha in arange(0.1,1.8,.1): Xhat = vtln(Y, warpFunction, alpha) mse = ((X - Xhat)**2).mean() if mse < min_mse: min_mse = mse min_alpha = alpha print "alpha=%.3f, min_mse=%.3f"%(min_alpha,min_mse) return min_alpha def vtln(frames, warpFunction='asymmetric', alpha=1.0): """ Vocal tract length normalization via frequency warping Python port of David Sundermann's matlab implementation by M. Casey inputs: frames - the frequency data to warp warpFuction - asymmetric, symmetric, power, quadratic, bilinear [asymmetric] alpha - the warp factor """ warp_funs = ['asymmetric', 'symmetric', 'power', 'quadratic', 'bilinear'] if not warpFunction in warp_funs: print "Invalid warp function" return warpedFreqs = zeros(frames.shape) for j in xrange(len(frames)): m = len(frames[j]) omega = (arange(m)+1.0) / m * pi omega_warped = omega if warpFunction is 'asymmetric' or warpFunction is 'symmetric': omega0 = 7.0/8.0 * pi if warpFunction is 'symmetric' and alpha > 1: omega0 = 7.0/(8.0 * alpha) * pi omega_warped[where(omega <= omega0)] = alpha * omega[where(omega <= omega0)] omega_warped[where(omega > omega0)] = alpha * omega0 + ((pi - alpha * omega0)/(pi - omega0)) * (omega[where(omega > omega0)] - omega0) omega_warped[where(omega_warped >= pi)] = pi - 0.00001 + 0.00001 * (omega_warped[where(omega_warped >= pi)]) elif warpFunction is 'power': omega_warped = pi * (omega / pi) ** alpha elif warpFunction is 'quadratic': omega_warped = omega + alpha * (omega / pi - (omega / pi)**2) elif warpFunction is 'bilinear': z = exp(omega * 1j) omega_warped = abs(-1j * log((z - alpha)/(1 - alpha*z))) omega_warped = omega_warped / pi * m warpedFrame = interp(omega_warped, arange(m)+1, frames[j]).T if isreal(frames[j][-1]): warpedFrame[-1] = real(warpedFrame[-1]) warpedFrame[isnan(warpedFrame)] = 0 warpedFreqs[j]=warpedFrame return warpedFreqs def tsne_ground_truth(file="StormyWeather_DataSet.csv", dtype='f8', tsne=True, **kwargs): """ Load vowel formant frequency ground truth labels and create array of freqs """ vowels = [] with open(file,"r") as f: reader = csv.reader(f) C = [row for row in reader] vowels.append(array([c[2:5] for c in C[3::6]],dtype=dtype)) vowels.append(array([c[2:5] for c in C[4::6]],dtype=dtype)) vowels.append(array([c[2:5] for c in C[5::6]],dtype=dtype)) vowels.append(array([c[2:5] for c in C[6::6]],dtype=dtype)) vowels = array(zip(*vowels)).reshape(15,-1) if not tsne: return vowels else: Z = _tsne(vowels, **kwargs) return Z def _tsne(X, dir_str="*.wav", perplexity=3, plotting=False): """ Utility function to compute tsne """ flist = sorted(glob.glob(dir_str)) Z = bh_sne(X, perplexity=perplexity) if plotting: figure() plot(Z[:,0], Z[:,1],'r.') [[text(p[0],p[1],'%s'%flist[i],fontsize=12) for i,p in enumerate(Z)]] return Z def harmonic_tsne_all(dir_expr="*.wav", return_analyses=False, **kwargs): """ time and frequency warped t-SNE of predominant harmonic spectrum inputs: dir_expr - directory expression for audio files ["*.wav"] return_analyses - return intermediate analyses instead of t-SNE or feature vector space **kwargs - key-word arguments for tsne analysis function {harmonic_}tsne_all() ref_idx - reference audio file, all audio warped to this [9] dfun - distance function to use [bregman.distance.euc_normed] dfw - whether to perform dynamic FREQUENCY warping (dfw) after DTW [True] tsne - whether to return t-SNE or just DTW spectrum [True] perplexity - how many near neighbours in t-SNE [3] plotting - whether to plot the t-SNE result [True] **kwargs - arguments to dpcore.dp DTW algorithm outputs (depends on value of return_analyses): Z2 - 2d embedding of dtw-dfw aligned predominant harmonics vowel analysis [return_analyses=False] H, X, V - harmonic peaks, reconstructed signals, harmonic vowel analysis spectra [return_analyses=True] """ normalize = testsignal.balance_signal flist = sorted(glob.glob(dir_expr)) conv = kwargs.pop("conv",False) H = [predominant_harmonics(filename=f, N=4096, H=1024) for f in flist] X = [normalize(peaks_cqft(f, H[i]['freqs'], H[i]['mags'], conv=conv, sonify=True, N=4096,H=1024)) for i,f in enumerate(flist)] V = [vowel_analysis(array(x,dtype='f4'),f) for x,f in zip(X,flist)] Z2 = tsne_all(V,ref_idx=9,**kwargs) if return_analyses: return H, X, V return Z2 def test_vowel_analysis(dir_expr="*.wav",harmonic=True,null_model=False,**kwargs): """ pearson r test bewteen vowel analysis and ground truth markup inputs: harmonic - whether to use predominant harmonic spectrum [True] null_model - whether to permute ground truth indices for null model testing **kwargs - key-word arguments for tsne analysis function {harmonic_}tsne_all() ref_idx - reference audio file, all audio warped to this [9] dfun - distance function to use [bregman.distance.euc_normed] dfw - whether to perform dynamic FREQUENCY warping (dfw) after DTW [True] tsne - whether to return t-SNE or just DTW spectrum [True] perplexity - how many near neighbours in t-SNE [3] plotting - whether to plot the t-SNE result [True] **kwargs - arguments to dpcore.dp DTW algorithm outputs: (r,p) - pearson r coefficient and p-value Z - ground truth embedding [tsne=True] / features [tsne=False] Z2 - vowel analysis embedding [tsne=True] / features [tsne=False] """ Z = tsne_ground_truth(**kwargs) if null_model: Z = permutation(Z) if harmonic: Z2 = harmonic_tsne_all(dir_expr=dir_expr, **kwargs) else: flist = sorted(glob.glob(dir_expr)) V = [vowel_analysis(filename=f, N=4096, H=1024) for f in flist] Z2 = tsne_all(V, ref_idx=9,**kwargs) Z2 = array([z.flatten() for z in Z2]) D0, D1 = distance.euc(Z,Z), distance.euc(Z2,Z2) # Remove zeros on main diagonal d0, d1 = D0[where(1-eye(len(Z)))].flatten(), D1[where(1-eye(len(Z2)))].flatten() p = ss.pearsonr(d0,d1) return p, Z, Z2 def ismir2015_evaluate(niter=0, **kwargs): """ inputs: niter - number of permutation test iterations to run [0=none] **kwargs - key-word arguments for tsne analysis function {harmonic_}tsne_all() ref_idx - reference audio file, all audio warped to this [9] dfun - distance function to use [bregman.distance.euc_normed] dfw - whether to perform dynamic FREQUENCY warping (dfw) after DTW [True] tsne - whether to return t-SNE or just DTW spectrum [True] perplexity - how many near neighbours in t-SNE [3] plotting - whether to plot the t-SNE result [True] **kwargs - arguments to dpcore.dp DTW algorithm outputs: res_n, res_h - result dicts for non-harmonic or harmonic vowel analyses {'p','Z','Z2'} """ res_n = {} # non harmonic analysis (full spectrum) res_n['p'],res_n['Z'],res_n['Z2'] = test_vowel_analysis(harmonic=False,**kwargs) res_n['null'] = [test_vowel_analysis(harmonic=False,null_model=True,**kwargs) for _ in range(niter)] res_h = {} # predominant harmonics analysis (extracted voice) res_h['p'],res_h['Z'],res_h['Z2'] = test_vowel_analysis(harmonic=True,**kwargs) res_h['null'] = [test_vowel_analysis(harmonic=True,null_model=True,**kwargs) for _ in range(niter)] return res_n, res_h if __name__ == "__main__": # ISMIR 2015 test """ Dynamic frequency warping VTLN Test with ground truth markup (perasonr plus permution null model) Background sound reduction [PreFest] Results per vowel (via alignment to reference mark-up) Plot tsne per vowel Currently, tsne on predominant harmonics is seemingly random. Flip-flopping between correlated and not with ground truth. Suggest inspecting vowel spectrum representation. Then experimenting with dimensionality reduction in tsne (pca_d), although this crashes at the moment. """ p_n, Z_n, Z2_n = test_vowel_analysis(harmonic=False,tsne=True,dfw=True,perplexity=3) p_h, Z_h, Z2_h = test_vowel_analysis(harmonic=True,tsne=True,dfw=True,perplexity=3)
import unittest import unittest.mock import functools from g1.asyncs import kernels from g1.asyncs.bases import tasks from g1.databases import sqlite from g1.operations.databases.bases import interfaces from g1.operations.databases.servers import connections from g1.operations.databases.servers import servers def synchronous(test_method): @kernels.with_kernel @functools.wraps(test_method) def wrapper(self): with self.server: kernels.run(test_method(self)) self.server.shutdown() return wrapper def with_kernel(test_method): @kernels.with_kernel @functools.wraps(test_method) def wrapper(self): with self.server: test_method(self) self.server.shutdown() return wrapper def de(p, c): return interfaces.DatabaseEvent(previous=p, current=c) def kv(r, k, v): return interfaces.KeyValue(revision=r, key=k, value=v) class ServersTest(unittest.TestCase): def setUp(self): super().setUp() unittest.mock.patch.multiple( connections, _WAIT_FOR_READER=0.01, _WAIT_FOR_WRITER=0.01, ).start() unittest.mock.patch.multiple( servers, _TRANSACTION_TIMEOUT=0.01, ).start() mock_time = unittest.mock.patch(servers.__name__ + '.time').start() self.mock_monotonic = mock_time.monotonic self.mock_monotonic.return_value = 0 self.mock_time = mock_time.time self.mock_time.return_value = 0 self.engine = sqlite.create_engine('sqlite://') self.publisher = unittest.mock.Mock() self.server = servers.DatabaseServer(self.engine, self.publisher) def tearDown(self): unittest.mock.patch.stopall() super().tearDown() def assert_publish(self, events): if not events: self.publisher.publish_nonblocking.assert_not_called() else: self.publisher.publish_nonblocking.assert_has_calls([ unittest.mock.call(event) for event in events ]) @synchronous async def test_no_transaction(self): self.assertEqual(await self.server.get_revision(), 0) self.assertIsNone(await self.server.get(key=b'k1')) self.assert_publish([]) self.assertIsNone(await self.server.set(key=b'k1', value=b'v1')) self.assertEqual(await self.server.get_revision(), 1) self.assertEqual( await self.server.get(key=b'k1'), interfaces.KeyValue(revision=1, key=b'k1', value=b'v1'), ) self.assert_publish([de(None, kv(1, b'k1', b'v1'))]) self.assertEqual( await self.server.set(key=b'k1', value=b'v2'), interfaces.KeyValue(revision=1, key=b'k1', value=b'v1'), ) self.assertEqual(await self.server.get_revision(), 2) self.assertEqual( await self.server.get(key=b'k1'), interfaces.KeyValue(revision=2, key=b'k1', value=b'v2'), ) self.assert_publish([ de(None, kv(1, b'k1', b'v1')), de(kv(1, b'k1', b'v1'), kv(2, b'k1', b'v2')), ]) self.assertEqual( await self.server.delete(), [interfaces.KeyValue(revision=2, key=b'k1', value=b'v2')], ) self.assertEqual(await self.server.get_revision(), 3) self.assertIsNone(await self.server.get(key=b'k1')) self.assert_publish([ de(None, kv(1, b'k1', b'v1')), de(kv(1, b'k1', b'v1'), kv(2, b'k1', b'v2')), de(kv(2, b'k1', b'v2'), None), ]) @synchronous async def test_transaction(self): await self.server.begin(transaction=1) self.assertEqual(await self.server.get_revision(transaction=1), 0) self.assertEqual(self.server._tx_revision, 0) self.assertIsNone(await self.server.get(key=b'k1', transaction=1)) self.assertIsNone( await self.server.set(key=b'k1', value=b'v1', transaction=1) ) # In a transaction, revision is incremented at the end. self.assertEqual(await self.server.get_revision(transaction=1), 0) self.assertEqual( await self.server.get(key=b'k1', transaction=1), interfaces.KeyValue(revision=1, key=b'k1', value=b'v1'), ) self.assert_publish([]) await self.server.commit(transaction=1) self.assert_publish([de(None, kv(1, b'k1', b'v1'))]) self.assertEqual(await self.server.get_revision(), 1) self.assertEqual( await self.server.get(key=b'k1'), interfaces.KeyValue(revision=1, key=b'k1', value=b'v1'), ) self.publisher.publish_nonblocking.reset_mock() await self.server.begin(transaction=2) self.assertEqual(await self.server.get_revision(transaction=2), 1) self.assertEqual(self.server._tx_revision, 1) self.assertEqual( await self.server.set(key=b'k1', value=b'v2', transaction=2), interfaces.KeyValue(revision=1, key=b'k1', value=b'v1'), ) # In a transaction, revision is incremented at the end. self.assertEqual(await self.server.get_revision(transaction=2), 1) self.assertEqual( await self.server.get(key=b'k1', transaction=2), interfaces.KeyValue(revision=2, key=b'k1', value=b'v2'), ) await self.server.rollback(transaction=2) self.publisher.publish_nonblocking.assert_not_called() self.assertEqual(await self.server.get_revision(), 1) self.assertEqual( await self.server.get(key=b'k1'), interfaces.KeyValue(revision=1, key=b'k1', value=b'v1'), ) @with_kernel def test_transaction_expired(self): self.assertEqual(tuple(self.server._manager._timeout_tx_ids), ()) kernels.run(self.server.begin(transaction=1)) self.assertEqual(self.server._manager.tx_id, 1) self.mock_monotonic.return_value = 10 server_task = tasks.spawn(self.server._run_timer_tasks) with self.assertRaises(kernels.KernelTimeout): kernels.run(timeout=0.02) self.assertEqual(self.server._manager.tx_id, 0) self.assertEqual(tuple(self.server._manager._timeout_tx_ids), (1, )) self.server.shutdown() kernels.run(timeout=0.01) self.assertIsNone(server_task.get_result_nonblocking()) @with_kernel def test_lease_expired(self): kernels.run(self.server.set(key=b'k1', value=b'v1')) kernels.run(self.server.lease_grant(lease=1, expiration=0.01)) kernels.run(self.server.lease_associate(lease=1, key=b'k1')) self.assertEqual(kernels.run(self.server.get_revision()), 1) self.assertEqual( kernels.run(self.server.get(key=b'k1')), interfaces.KeyValue(revision=1, key=b'k1', value=b'v1'), ) self.assertEqual( kernels.run(self.server.lease_get(lease=1)), interfaces.Lease(lease=1, expiration=0.01, keys=(b'k1', )), ) server_task = tasks.spawn(self.server._run_timer_tasks) self.mock_time.return_value = 10 with self.assertRaises(kernels.KernelTimeout): kernels.run(timeout=0.02) self.assertEqual(kernels.run(self.server.get_revision()), 2) self.assertIsNone(kernels.run(self.server.get(key=b'k1'))) self.assertIsNone(kernels.run(self.server.lease_get(lease=1))) self.assert_publish([de(None, kv(1, b'k1', b'v1'))]) self.server.shutdown() kernels.run(timeout=0.01) self.assertIsNone(server_task.get_result_nonblocking()) @synchronous async def test_set_not_publish(self): self.assertIsNone(await self.server.set(key=b'k1', value=b'v1')) self.publisher.publish_nonblocking.assert_called_once_with( interfaces.DatabaseEvent( previous=None, current=kv(1, b'k1', b'v1'), ), ) self.publisher.publish_nonblocking.reset_mock() self.assertEqual( await self.server.set(key=b'k1', value=b'v1'), kv(1, b'k1', b'v1'), ) self.publisher.publish_nonblocking.assert_not_called() if __name__ == '__main__': unittest.main()
#!/usr/bin/env python2 # # Copyright 2015-2016 Carnegie Mellon University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys fileDir = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(fileDir, "..", "..")) import copy import txaio txaio.use_twisted() from autobahn.twisted.websocket import WebSocketServerProtocol, \ WebSocketServerFactory from twisted.internet import task, defer from twisted.internet.ssl import DefaultOpenSSLContextFactory from twisted.python import log import argparse import cv2 import imagehash import json from PIL import Image import numpy as np import os import StringIO import urllib import base64 from sklearn.decomposition import PCA from sklearn.grid_search import GridSearchCV from sklearn.manifold import TSNE from sklearn.svm import SVC from sklearn.cluster import KMeans import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm import openface import pickle, pprint import json from numpy import genfromtxt import requests import pandas as pd modelDir = os.path.join(fileDir, '..', '..', 'models') dlibModelDir = os.path.join(modelDir, 'dlib') openfaceModelDir = os.path.join(modelDir, 'openface') trainDir=os.path.join(fileDir,'..','..','Train_Image') classifierDir=os.path.join(fileDir,'..','..') imagesDir=os.path.join(fileDir,'..','..') peopleDir=os.path.join(fileDir,'..','..') featureDir=os.path.join(fileDir,'..','..','Feature_gui') Feature_unknown=os.path.join(fileDir,'..','..','Feature_unknown') alignDir=os.path.join(fileDir,'..','..') # For TLS connections tls_crt = os.path.join(fileDir, 'tls', 'server.crt') tls_key = os.path.join(fileDir, 'tls', 'server.key') parser = argparse.ArgumentParser() parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.", default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat")) parser.add_argument('--networkModel', type=str, help="Path to Torch network model.", default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')) parser.add_argument('--imgDim', type=int, help="Default image dimension.", default=96) parser.add_argument('--cuda', action='store_true') parser.add_argument('--unknown', type=bool, default=False, help='Try to predict unknown people') parser.add_argument('--port', type=int, default=9000, help='WebSocket Port') args = parser.parse_args() align = openface.AlignDlib(args.dlibFacePredictor) net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim, cuda=args.cuda) def loadDist(): try: with open(featureDir+'/classifier.pkl', 'rb') as f: # if sys.version_info[0] < 3: if sys.version_info[0] < 3: (le, clf) = pickle.load(f) return (le,clf) else: (le, clf) = pickle.load(f, encoding='latin1') return (le,clf) except Exception as e: return None def loadUnknown(): try: with open(Feature_unknown+'/classifier.pkl', 'rb') as f: # if sys.version_info[0] < 3: if sys.version_info[0] < 3: (le, clf) = pickle.load(f) return (le,clf) else: (le, clf) = pickle.load(f, encoding='latin1') return (le,clf) except Exception as e: return None def retrain(): os.chdir(alignDir) os.remove(alignDir+'/Aligned_data'+'/cache.t7') os.system('python align-dlib.py'+' '+trainDir+' '+'align outerEyesAndNose Aligned_data/ --size 96') os.system('./batch-represent/main.lua -outDir Feature_gui/ -data Aligned_data/') os.system('python classifier.py train Feature_gui/ --classifier RadialSvm') os.remove(alignDir+'/Aligned_data_unknown'+'/cache.t7') os.system('cp -r Feature_gui/classifier.pkl Feature_dir/') os.system('cp -r Unknown/ Train_Image/') os.system('python align-dlib.py Train_Image/ align outerEyesAndNose Aligned_data_unknown/ --size 96') os.system('./batch-represent/main.lua -outDir Feature_unknown/ -data Aligned_data_unknown/') os.system('python classifier.py train Feature_unknown/ --classifier RadialSvm') os.remove(alignDir+'/Train_Image/'+'Unknown/') os.chdir(fileDir) try: with open(self.featureDir+'/classifier.pkl', 'rb') as f: # if sys.version_info[0] < 3: if sys.version_info[0] < 3: (le, clf) = pickle.load(f) return (le,clf) else: (le, clf) = pickle.load(f, encoding='latin1') return (le,clf) except Exception as e: return None def loadImages(): try: with open('images.pkl', 'rb') as f: # if sys.version_info[0] < 3: images = pickle.load(f) return images except Exception as e: return {} #my_data = genfromtxt('people.csv', delimiter=',') def loadModel(): # model = open('model.pkl', 'r') # svm_persisted = pickle.load('model.pkl') # output.close() # return svm_persisted # return True try: with open('model.pkl', 'rb') as f: # if sys.version_info[0] < 3: mod = pickle.load(f) return mod except Exception as e: return None def loadPeople(): try: with open('people.pkl', 'rb') as f: mod = pickle.load(f) return mod except Exception as e: return [] class Face: def __init__(self, rep, identity): self.rep = rep self.identity = identity def __repr__(self): return "{{id: {}, rep[0:5]: {}}}".format( str(self.identity), self.rep[0:5] ) class OpenFaceServerProtocol(WebSocketServerProtocol): def __init__(self): super(OpenFaceServerProtocol, self).__init__() self.images = {} self.training = True self.people = [] self.svm = None (self.le_dist,self.clf_dist)=loadDist() (self.le_unknown,self.clf_unknown)=loadUnknown() self.centroids_dist=pd.read_csv(featureDir+'/centroids_csv.csv') self.centroids_dist.drop(self.centroids_dist.columns[0],axis=1,inplace=True) self.embeddings_dist=pd.read_csv(featureDir+'/embeddings.csv') self.embeddings_unknown=pd.read_csv(Feature_unknown+'/embeddings.csv') self.centroids_unknown=pd.read_csv(Feature_unknown+'/centroids_csv.csv') self.centroids_unknown.drop(self.centroids_unknown.columns[0],axis=1,inplace=True) self.KMeans=None #self.centroids.drop(self.centroids.columns[0],axis=1,inplace=True) #self.retrain=False self.classifier="Distance" if args.unknown: self.unknownImgs = np.load("./examples/web/unknown.npy") def onConnect(self, request): print("Client connecting: {0}".format(request.peer)) self.training = True def onOpen(self): print("WebSocket connection open.") def onMessage(self, payload, isBinary): raw = payload.decode('utf8') msg = json.loads(raw) print("Received {} message of length {}.".format( msg['type'], len(raw))) if msg['type'] == "ALL_STATE": self.loadState(msg['images'], msg['training'], msg['people']) elif msg['type'] == "NULL": self.sendMessage('{"type": "NULL"}') elif msg['type'] == "FRAME": self.processFrame(msg['dataURL'], msg['identity']) self.sendMessage('{"type": "PROCESSED"}') elif msg['type'] == "TRAINING": self.training = msg['val'] #if not self.training: # self.trainSVM() elif msg['type'] == "ADD_PERSON": if msg['val'].encode('ascii','ignore') not in self.people: self.people.append(msg['val'].encode('ascii', 'ignore')) self.people=self.people #np.savetxt("people.csv", self.people, delimiter=",") with open(peopleDir+'/people.pkl', 'w') as f: pickle.dump(self.people, f) print(self.people) if not os.path.isdir(trainDir+"/"+msg['val'].encode('ascii','ignore')): os.mkdir(trainDir+"/"+msg['val'].encode('ascii','ignore')) os.chdir(trainDir+"/"+msg['val'].encode('ascii','ignore')) elif msg['type'] == "UPDATE_IDENTITY": h = msg['hash'].encode('ascii', 'ignore') if h in self.images: self.images[h].identity = msg['idx'] if not self.training: self.trainSVM() else: print("Image not found.") elif msg['type'] == "REMOVE_IMAGE": h = msg['hash'].encode('ascii', 'ignore') if h in self.images: del self.images[h] if not self.training: self.trainSVM() else: print("Image not found.") elif msg['type'] == 'REQ_TSNE': print "Received message TSNE" self.sendTSNE(msg['people']) elif msg['type']== 'RE-TRAIN': print self.classifier self.retrain() elif msg['type']=='DISTANCE': #(self.le,self.clf)=loadDist() self.classifier="Distance" self.centroids=pd.read_csv(featureDir+'/centroids_csv.csv') self.centroids.drop(self.centroids.columns[0],axis=1,inplace=True) print self.classifier elif msg['type']=="UNKNOWN": #(self.le,self.clf)=loadUnknown() self.classifier="Unknown" self.centroids=pd.read_csv(Feature_unknown+'/centroids_csv.csv') self.centroids.drop(self.centroids.columns[0],axis=1,inplace=True) print self.classifier else: print("Warning: Unknown message type: {}".format(msg['type'])) def onClose(self, wasClean, code, reason): print("WebSocket connection closed: {0}".format(reason)) def loadState(self, jsImages, training, jsPeople): self.training = training for jsImage in jsImages: h = jsImage['hash'].encode('ascii', 'ignore') self.images[h] = Face(np.array(jsImage['representation']), jsImage['identity']) for jsPerson in jsPeople: self.people.append(jsPerson.encode('ascii', 'ignore')) if not training: self.trainSVM() def getData(self): X = [] y = [] for img in self.images.values(): X.append(img.rep) y.append(img.identity) numIdentities = len(set(y + [-1])) - 1 if numIdentities == 0: return None if args.unknown: numUnknown = y.count(-1) numIdentified = len(y) - numUnknown numUnknownAdd = (numIdentified / numIdentities) - numUnknown if numUnknownAdd > 0: print("+ Augmenting with {} unknown images.".format(numUnknownAdd)) for rep in self.unknownImgs[:numUnknownAdd]: # print(rep) X.append(rep) y.append(-1) X = np.vstack(X) y = np.array(y) return (X, y) def sendTSNE(self, people): #if self.classifier=='Distance': # d=self.embeddings_dist #else: # d=self.embeddings_unknown d = pd.read_csv(featureDir+'/embeddings.csv') if d is None: return else: print d.columns (X, y) = (copy.copy(d),copy.copy(d['labels'])) X.drop('labels',axis=1, inplace=True) X_pca = PCA(n_components=50).fit_transform(X, X) tsne = TSNE(n_components=2, init='random', random_state=0) X_r = tsne.fit_transform(X_pca) yVals = list(np.unique(y)) colors = cm.rainbow(np.linspace(0, 1, len(yVals))) # print(yVals) plt.figure() for c, i in zip(colors, yVals): #name = "Unknown" if i == -1 else people[i] name=i plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=name) plt.legend() imgdata = StringIO.StringIO() plt.savefig(imgdata, format='png') imgdata.seek(0) content = 'data:image/png;base64,' + \ urllib.quote(base64.b64encode(imgdata.buf)) msg = { "type": "TSNE_DATA", "content": content } self.sendMessage(json.dumps(msg)) def retrain(self): os.chdir(alignDir) if self.classifier=="Distance": os.system('python align-dlib.py'+' '+trainDir+' '+'align outerEyesAndNose Aligned_data/ --size 96') if os.path.isfile(alignDir+'/Aligned_data'+'/cache.t7'): os.remove(alignDir+'/Aligned_data'+'/cache.t7') os.system('./batch-represent/main.lua -outDir Feature_gui/ -data Aligned_data/') os.system('python classifier.py train Feature_gui/ --classifier RadialSvm') os.system('cp -r Feature_gui/classifier.pkl Feature_dir/') os.system('cp -r Feature_gui/centroids_csv.csv Feature_dir/') else: os.system('cp -r Unknown/ Train_Image/') os.system('python align-dlib.py Train_Image/ align outerEyesAndNose Aligned_data_unknown/ --size 96') if os.path.isfile(alignDir+'/Aligned_data_unknown'+'/cache.t7'): os.remove(alignDir+'/Aligned_data_unknown'+'/cache.t7') os.system('./batch-represent/main.lua -outDir Feature_unknown/ -data Aligned_data_unknown/') os.system('python classifier.py train Feature_unknown/ --classifier RadialSvm') os.chdir(alignDir+'/Train_Image/') os.system('rm -rf Unknown/') os.chdir(fileDir) print self.classifier #if self.classifier=="Distance": try: with open(featureDir+'/classifier.pkl', 'rb') as f: # if sys.version_info[0] < 3: if sys.version_info[0] < 3: (self.le_dist, self.clf_dist) = pickle.load(f) #return (le,clf) else: (self.le_dist, clf_dist) = pickle.load(f, encoding='latin1') #return (le,clf) except Exception as e: return None #if self.classifier=="Unknown": try: with open(Feature_unknown+'/classifier.pkl', 'rb') as f: # if sys.version_info[0] < 3: if sys.version_info[0] < 3: (self.le_unknown, self.clf_unknown) = pickle.load(f) #return (le,clf) else: (self.le_unknown, self.clf_unknown) = pickle.load(f, encoding='latin1') #return (le,clf) except Exception as e: return None msg={"type":"RE-TRAINED"} self.sendMessage(json.dumps(msg)) return def trainSVM(self): print("+ Training SVM on {} labeled images.".format(len(self.images))) d = self.getData() if d is None: self.svm = None return else: (X, y) = d numIdentities = len(set(y + [-1])) if numIdentities <= 1: return param_grid = [ {'C': [1, 10, 100, 1000], 'kernel': ['linear']}, {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']} ] self.svm = GridSearchCV(SVC(C=1), param_grid, cv=5).fit(X, y) print "Persisting Model", self.svm self.persistModel(self.svm) print "Loading Model" def loadModel(self): # model = open('model.pkl', 'r') # svm_persisted = pickle.load('model.pkl') # output.close() # return svm_persisted # return True with open(classifierDir+'/model.pkl', 'rb') as f: # if sys.version_info[0] < 3: mod = pickle.load(f) return mod def persistModel(self, mod): # output = open('model.pkl', 'w') with open(classifierDir+'/model.pkl', 'wb') as f: pickle.dump(mod, f) # svm_persisted = pickle.dump(mod, 'model.pkl', protocol=2) # output.close() return True def processFrame(self, dataURL, identity): head = "data:image/jpeg;base64," assert(dataURL.startswith(head)) imgdata = base64.b64decode(dataURL[len(head):]) imgF = StringIO.StringIO() imgF.write(imgdata) imgF.seek(0) img = Image.open(imgF) buf = np.fliplr(np.asarray(img)) rgbFrame = np.zeros((300, 400, 3), dtype=np.uint8) rgbFrame[:, :, 0] = buf[:, :, 2] rgbFrame[:, :, 1] = buf[:, :, 1] rgbFrame[:, :, 2] = buf[:, :, 0] if not self.training: annotatedFrame = np.copy(buf) # cv2.imshow('frame', rgbFrame) # if cv2.waitKey(1) & 0xFF == ord('q'): # return identities = [] if not self.training: bbs = align.getAllFaceBoundingBoxes(rgbFrame) else: bb = align.getLargestFaceBoundingBox(rgbFrame) bbs = [bb] if bb is not None else [] content=[] for bb in bbs: # print(len(bbs)) faces ={} landmarks = align.findLandmarks(rgbFrame, bb) alignedFace = align.align(args.imgDim, rgbFrame, bb, landmarks=landmarks, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE) if alignedFace is None: continue phash = str(imagehash.phash(Image.fromarray(alignedFace))) if phash in self.images: identity = self.images[phash].identity else: rep = net.forward(alignedFace) # print(rep) if self.training: self.images[phash] = Face(rep, identity) # TODO: Transferring as a string is suboptimal. # content = [str(x) for x in cv2.resize(alignedFace, (0,0), # fx=0.5, fy=0.5).flatten()] content = [str(x) for x in alignedFace.flatten()] msg = { "type": "NEW_IMAGE", "hash": phash, "content": content, "identity": identity, "representation": rep.tolist() } self.sendMessage(json.dumps(msg)) #os.remove(alignDir+'/Aligned_data'+'/cache.t7') personDir=os.getcwd() with open (phash+'.jpg', 'wb') as handle: #response = requests.get(image, stream=True) #if not response.ok: # print response #for block in response.iter_content(1024): # if not block: # break # handle.write(block) handle.write(imgdata) os.chdir(fileDir) with open(imagesDir+'/images.pkl', 'w') as f: pickle.dump(self.images, f) os.chdir(personDir) else: ####Prediction of Class using offline model with distance approach#### dist_distance={} for key in self.centroids_dist.keys(): dist_distance[key]=(np.linalg.norm(rep-list(self.centroids_dist[key]))) print self.centroids_dist.keys() prediction_dist=self.clf_dist.predict_proba(rep).ravel() #print prediction maxI = np.argmax(prediction_dist) person_dist = self.le_dist.inverse_transform(maxI) confidence = prediction_dist[maxI] distance=dist_distance[person_dist] person_dist_min=min(dist_distance, key=dist_distance.get) print person_dist #print dist_distance if dist_distance[person_dist]>0.70: person_dist="unknown" ################################################################### dist_unknown={} for key in self.centroids_unknown.keys(): dist_unknown[key]=(np.linalg.norm(rep-list(self.centroids_unknown[key]))) print self.centroids_unknown.keys() prediction_unknown=self.clf_unknown.predict_proba(rep).ravel() maxI = np.argmax(prediction_unknown) person_unknown = self.le_unknown.inverse_transform(maxI) confidence = prediction_unknown[maxI] distance=dist_unknown[person_unknown] #person_dist=min(dist, key=dist.get) print dist_unknown #if dist_unknown[person_unknown]>0.8: # person_unknown="unknown" ##################################################################################################################################### if len(self.people) == 0: identity = -1 elif len(self.people) == 1: identity = 0 elif self.svm: identity = self.svm.predict(rep)[0] print "predicted",identity else: print("hhh") identity = -1 if identity not in identities: identities.append(identity) print identities if not self.training: bl = (bb.left(), bb.bottom()) tr = (bb.right(), bb.top()) cv2.rectangle(annotatedFrame, bl, tr, color=(153, 255, 204), thickness=3) for p in openface.AlignDlib.OUTER_EYES_AND_NOSE: cv2.circle(annotatedFrame, center=landmarks[p], radius=3, color=(102, 204, 255), thickness=-1) #if identity == -1: # if len(self.people) == 1: # name = self.people[0] # else: # name = "Unknown" #else: # name = self.people[identity] if self.classifier=="Distance": person_predicted=person_dist else: person_predicted=person_unknown cv2.putText(annotatedFrame, person_predicted, (bb.left(), bb.top() - 10), cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.75, color=(152, 255, 204), thickness=2) faces['identity']=person_predicted faces['left']=bb.left() faces['right']=bb.right() faces['top']=bb.top() faces['bottom']=bb.bottom() content.append(faces) if not self.training: msg = { "type": "IDENTITIES", "identities": identities } self.sendMessage(json.dumps(msg)) plt.figure() plt.imshow(annotatedFrame) plt.xticks([]) plt.yticks([]) imgdata = StringIO.StringIO() plt.savefig(imgdata, format='png') imgdata.seek(0) #content = 'data:image/png;base64,' + \ # urllib.quote(base64.b64encode(imgdata.buf)) #msg = { # "type": "ANNOTATED", # "content": content #} #content={"identity":person_predicted,"bottom":bb.bottom(),"top":bb.top(),"left":bb.left(),"right":bb.right()} msg={ "type":"ANNOTATED", "content":content } print "printing content",content self.sendMessage(json.dumps(msg)) plt.close() #self.sendMessage(json.dumps(msg)) def main(reactor): log.startLogging(sys.stdout) factory = WebSocketServerFactory() factory.protocol = OpenFaceServerProtocol ctx_factory = DefaultOpenSSLContextFactory(tls_key, tls_crt) reactor.listenSSL(args.port, factory, ctx_factory) return defer.Deferred() if __name__ == '__main__': task.react(main)
# coding: utf-8 import os import threading import traceback import time from datetime import datetime from flask import Flask from influxdb import InfluxDBClient from kombu import Connection, Exchange, Queue from kubernetes import client, config as kube_config from oslo_service import periodic_task from oslo_config import cfg from oslo_log import log from oslo_service import service import config, util wsgi_app = Flask(__name__) CONF = cfg.CONF LOG = log.getLogger(__name__) metrics_map = {} STATUS_NOT_INSTALLED = -1 STATUS_INSTALLED = 0 STATUS_ACTIVE = 1 STATUS_ACTIVE_ALL_GREEN = 2 TEST_QUEUE_NAME = 'testqueue' TEST_EXCHANGE_NAME = 'testex' TEST_ROUTING_KEY = 'test.health' TEST_MSG = 'hello' class ServiceManager(service.Service): def __init__(self): super(ServiceManager, self).__init__() def start(self): LOG.info('start') if CONF.influxdb.enable: self.influxdb_periodic_tasks = InfluxdbPeriodicTasks() self.tg.add_dynamic_timer(self._get_influxdb_periodic_tasks, initial_delay=0, periodic_interval_max=120) if not CONF.rabbitmq_manager.enable_prometheus_exporter: self.prometheus_exporter_thread = self._spawn_prometheus_exporter() else: self.prometheus_exporter_thread = None self.periodic_tasks = ServicePeriodicTasks() self.tg.add_dynamic_timer(self._get_periodic_tasks, initial_delay=0, periodic_interval_max=120) def wait(self): LOG.info('wait') def stop(self): LOG.info('stop') if self.prometheus_exporter_thread is not None: self.prometheus_exporter_thread.join() super(ServiceManager, self).stop() def _get_periodic_tasks(self, raise_on_error=False): ctxt = {} return self.periodic_tasks.periodic_tasks(ctxt, raise_on_error=raise_on_error) def _get_influxdb_periodic_tasks(self, raise_on_error=False): ctxt = {} return self.influxdb_periodic_tasks.periodic_tasks(ctxt, raise_on_error=raise_on_error) def _spawn_prometheus_exporter(self): t = threading.Thread(target=wsgi_app.run, kwargs={ 'host': CONF.openstack_deploy_manager.bind_host, 'port': CONF.openstack_deploy_manager.bind_port }) t.daemon = True t.start() return t # # influxdb reporter # class InfluxdbPeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(InfluxdbPeriodicTasks, self).__init__(CONF) self.influxdb = InfluxDBClient( CONF.influxdb.host, CONF.influxdb.port, CONF.influxdb.user, CONF.influxdb.password, CONF.influxdb.database, ) def periodic_tasks(self, context, raise_on_error=False): return self.run_periodic_tasks(context, raise_on_error=raise_on_error) @periodic_task.periodic_task(spacing=60) def report(self, context): LOG.info('Report metrics to influxdb') json_body = [] for measurement, metrics in metrics_map.items(): json_body.append({ "measurement": measurement.split(':')[0], "tags": metrics["tags"], "fields": { "value": metrics["value"], } }) if len(json_body) > 0: self.influxdb.write_points(json_body) # # prometheus exporter # @wsgi_app.route("/") def status(): return "OK" @wsgi_app.route("/metrics") def metrics(): pmetrics = '' for measurement, metrics in metrics_map.items(): labels = '' for k, v in metrics['tags'].items(): labels += '{0}="{1}",'.format(k, v) labels = labels[:-1] pmetrics += '{0}{{{1}}} {2}\n'.format(measurement.split(':')[0], labels, metrics['value']) return pmetrics # # service tasks # class ServicePeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(ServicePeriodicTasks, self).__init__(CONF) if os.path.exists('{0}/.kube/config'.format(os.environ['HOME'])): kube_config.load_kube_config() else: kube_config.load_incluster_config() self.k8s_corev1api = client.CoreV1Api() self.user = CONF.rabbitmq_manager.user self.password = CONF.rabbitmq_manager.password self.cluster_map = {} self.svc_map = {} self.helm_resource_map = {} self.k8s_services = {} self.k8s_pods = {} self.helm = util.Helm() self.update_resource_map() # initialize svc_map, and install rabbitmq-svc for name in CONF.rabbitmq_manager.services: cluster_name = 'rabbitmq-cluster-{0}'.format(name) svc_name = 'rabbitmq-svc-{0}'.format(name) self.init_cluster_data(cluster_name) self.svc_map[svc_name] = { 'selector': cluster_name, 'vhost': name, } if svc_name not in self.helm_resource_map: self.helm.install(svc_name, 'rabbitmq-svc') self.update_resource_map() for svc_name, svc in self.svc_map.items(): k8s_svc = self.k8s_svc_map[svc_name] node_port = None for port in k8s_svc.spec.ports: if port.name == 'rabbitmq': node_port = port.node_port break transport_url = 'rabbit:\\\\/\\\\/' for node in self.rabbitmq_nodes: node_ip = None for address in node.status.addresses: if address.type == 'InternalIP': node_ip = address.address break transport_url += "{0}:{1}@{2}:{3}\,".format( self.user, self.password, node_ip, node_port ) transport_url = transport_url[0:-2] + '\\\\/' + svc['vhost'] svc['transport_url'] = transport_url option = "--set selector={0},transport_url='{1}'".format(svc['selector'], svc['transport_url']) self.helm.upgrade(svc_name, 'rabbitmq-svc', option) def update_resource_map(self): self.helm_resource_map = {} self.k8s_svc_map = {} self.k8s_pods_map = {} self.rabbitmq_nodes = self.k8s_corev1api.list_node( label_selector=CONF.rabbitmq_manager.node_label_selector).items if len(self.rabbitmq_nodes) < 1: raise Exception('rabbitmq-nodes are not found') self.helm_resource_map = self.helm.get_resource_map() k8s_svcs = self.k8s_corev1api.list_namespaced_service( CONF.k8s.namespace).items k8s_pods = self.k8s_corev1api.list_namespaced_pod( CONF.k8s.namespace).items for k8s_pod in k8s_pods: app_label = k8s_pod.metadata.labels.get('app') if app_label is None: continue pods = self.k8s_pods_map.get(app_label, []) pods.append(k8s_pod) self.k8s_pods_map[app_label] = pods for k8s_svc in k8s_svcs: name = k8s_svc.metadata.name self.k8s_svc_map[name] = k8s_svc def periodic_tasks(self, context, raise_on_error=False): return self.run_periodic_tasks(context, raise_on_error=raise_on_error) @periodic_task.periodic_task(spacing=10) def check(self, context): LOG.info('Start check') self.update_resource_map() for name, cluster in self.cluster_map.items(): if name not in self.helm_resource_map: self.helm.install(name, 'rabbitmq-cluster') cluster['provisioning_status'] = STATUS_INSTALLED continue pods = 0 running_pods = 0 running_nodes = 0 unhealty_pods = 0 healty_pods = 0 failed_get_cluster_status = 0 partition_pods = 0 is_healty = True is_alert = False for pod in self.k8s_pods_map[name]: pods += 1 if not pod.status.phase == 'Running': continue condition_status = True for condition in pod.status.conditions: if condition.status != "True": condition_status = False if not condition_status: continue is_ready = True LOG.debug(pod.status) for cstatus in pod.status.container_statuses: if not cstatus.ready: is_ready = False break if not is_ready: unhealty_pods += 1 continue running_pods += 1 cluster_status = self.get_cluster_status(pod) if cluster_status is None: failed_get_cluster_status += 1 continue if cluster_status['is_partition']: partition_pods += 1 running_nodes += cluster_status['running_nodes'] if self.test_queue(pod): healty_pods += 1 if partition_pods > 0: is_alert = True if not is_alert: if unhealty_pods != 0: is_healty = False cluster['warning']['exists_unhealty_pods'] += unhealty_pods alert_threshold = CONF.rabbitmq_manager.wait_unhealty_pods_time / CONF.rabbitmq_manager.check_interval if cluster['provisioning_status'] < STATUS_ACTIVE: alert_threshold = alert_threshold * pods LOG.warning('Found unhealty_pods={0}, alert_threshold={1}'.format( cluster['warning']['exists_unhealty_pods'], alert_threshold )) if cluster['warning']['exists_unhealty_pods'] >= alert_threshold: is_alert = True else: cluster['warning']['exists_unhealty_pods'] = 0 standalone_pods = (running_pods * running_pods) - running_nodes if standalone_pods != 0: is_healty = False LOG.warning('Found standalone_pods') cluster['warning']['exists_standalone_nodes'] += 1 if cluster['warning']['exists_standalone_nodes'] >= 2: is_alert = True else: cluster['warning']['exists_standalone_nodes'] = 0 if failed_get_cluster_status != 0: is_healty = False LOG.warning('Failed get cluster_status') cluster['warning']['failed_get_cluster_status'] += 1 if cluster['warning']['failed_get_cluster_status'] >= 4: is_alert = True else: cluster['warning']['failed_get_cluster_status'] = 0 if is_healty: cluster['provisioning_status'] = 1 if is_alert: self.alert(name) metrics_map['rabbitmq_partition:' + name] = { 'tags': {"deployment": name}, 'value': partition_pods, } metrics_map['rabbitmq_unhealty_pods:' + name] = { 'tags': {"deployment": name}, 'value': unhealty_pods, } metrics_map['rabbitmq_healty_pods:' + name] = { 'tags': {"deployment": name}, 'value': healty_pods, } LOG.info("Check Summary") for cluster_name, cluster in self.cluster_map.items(): LOG.info("{0}: {1}".format(cluster_name, cluster)) def get_cluster_status(self, pod): pod_name = pod.metadata.name cluster_status = util.execute('kubectl exec -n {0} {1} rabbitmqctl cluster_status'.format( CONF.k8s.namespace, pod_name), enable_exception=False) if cluster_status['return_code'] != 0: return None splited_msg = cluster_status['stdout'].split('{nodes', 1) splited_msg = splited_msg[1].split('{running_nodes,', 1) tmp_splited_msg = splited_msg[1].split('{cluster_name,', 1) if len(tmp_splited_msg) == 2: running_nodes = tmp_splited_msg[0] splited_msg = tmp_splited_msg[1].split('{partitions,', 1) else: splited_msg = splited_msg[1].split('{partitions,', 1) running_nodes = splited_msg[0] tmp_splited_msg = splited_msg[1].split('{alarms,', 1) if len(tmp_splited_msg) == 2: partitions = tmp_splited_msg[0] else: splited_msg = splited_msg[1].split('}]', 1) partitions = splited_msg[0] running_nodes_count = running_nodes.count('@') partitions_count = partitions.count('@') return { 'running_nodes': running_nodes_count, 'is_partition': (partitions_count > 0), } def test_queue(self, pod): connection = 'amqp://{0}:{1}@{2}:5672/test'.format( self.user, self.password, pod.status.pod_ip) exchange = Exchange(TEST_EXCHANGE_NAME, type='direct') queue = Queue(TEST_QUEUE_NAME, exchange=exchange, routing_key=TEST_ROUTING_KEY) is_success = False start = time.time() try: with Connection(connection) as c: bound = queue(c.default_channel) bound.declare() bound_exc = exchange(c.default_channel) msg = bound_exc.Message(TEST_MSG) bound_exc.publish(msg, routing_key=TEST_ROUTING_KEY) simple_queue = c.SimpleQueue(queue) msg = simple_queue.get(block=True, timeout=CONF.rabbitmq_manager.rpc_timeout) msg.ack() is_success = True except Exception: LOG.error(traceback.format_exc()) if is_success: elapsed_time = time.time() - start else: elapsed_time = 25 LOG.info("Latency: {0}".format(elapsed_time)) timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') metrics_map['rabbitmq_msg_latency:'+pod.metadata.name] = { 'tags': {"pod": pod.metadata.name, "deployment": pod.metadata.labels['app']}, 'value': elapsed_time, 'time': timestamp, } return is_success def init_cluster_data(self, name): self.cluster_map[name] = { 'provisioning_status': STATUS_NOT_INSTALLED, 'warning': { 'exists_unhealty_pods': 0, 'exists_standalone_nodes': 0, 'failed_get_cluster_status': 0, } } def alert(self, name): LOG.error("Alert {0}: {1}".format(name, self.cluster_map[name])) def main(): config.init() launcher = service.launch(CONF, ServiceManager()) launcher.wait() if __name__ == '__main__': main()
#!/usr/bin/env python # -*- coding: utf-8 -*- # import ConfigParser import csv import datetime import logging import os.path import socket import subprocess import sys import time from trademybitapi import TradeMyBitAPI from pycgminer import CgminerAPI class Algo: def __init__(self, name): self.command = '' # the command that is run when we want to mine this coin. self.name = name self.cnt = 0 class TradeMyBitSwitcher(object): def __init__(self): # Define supported algo self.algos = {} self.profitability_log = None self.__load_config() self.api = TradeMyBitAPI(self.api_key, 'https://pool.trademybit.com/api/') self.cgminer = CgminerAPI(self.cgminer_host, self.cgminer_port) # We track state in a variable self.current_algo = None def main(self): try: # cnt_all = 0 # main loop print '-' * 72 while True: # print header # self.logger.info("<<< Round %d >>>" % (cnt_all+1)) # get data from sources self.logger.debug("Fetching data...") bestalgo = self.best_algo() self.logger.debug("=> Best: %s | Currently mining: %s" % (bestalgo, self.current_algo)) if bestalgo != self.current_algo and bestalgo != None: # i.e. if we're not already mining the best algo self.switch_algo(bestalgo) elif self.current_algo == None: # No miner running and profitability is similar, run the first algo self.logger.warning('No miner running') self.switch_algo(self.algos.keys()[0]) # sleep self.logger.debug('Going to sleep for %dmin...' % self.idletime) i = 0 while i < self.idletime*60: time.sleep(1) i += 1 except KeyboardInterrupt: self.logger.warn('Caught KeyboardInterrupt, terminating...') self.cleanup() def cleanup(self): """Cleanup our mess""" if self.profitability_log: self.profitability_file.close() sys.exit() def best_algo(self): """Retrieves the "bestalgo" from TMB api""" try: data = self.api.bestalgo() # parse json data into variables algo = [] score = [] for i in xrange(len(data)): #If algorithm is defined in configuration file, # add score and algo to lists to be ranked next. if self.algos.has_key(data[i]["algo"]): algo.append(data[i]["algo"]) score.append(float(data[i]["score"])) #If not first run through, score algo and calculate if we switch, otherwise just use top algo to start if self.current_algo is not None : #get the score of the current algo we are mining currentAlgoScore = score[algo.index(self.current_algo)] #get the highest algo score from TMB topAlgoScore = max(score) topAlgoIndex = score.index(max(score)) #If we cross the threshold defined, switch, otherwise don't if (topAlgoScore - currentAlgoScore) / currentAlgoScore > self.profitability_threshold: best = algo[topAlgoIndex] else: best = None else: best = algo[score.index(max(score))] #Build logging string logString = "" for i in xrange(len(algo)): logString += "%s : %f | " % (algo[i], score[i]) #strip the trailing pipe logString = logString[:-2] self.logger.debug(logString) profitDict = {} if self.profitability_log: profitDict['date'] = datetime.datetime.now() for i in xrange(len(algo)): profitDict[algo[i]] = score[i] self.profitability_log.writerow(profitDict) self.profitability_file.flush() return best except (socket.error, KeyError): self.logger.warning('Cannot connect to TMB API...') return None # # Return scrypt/nscrypt based on the version of the miner running # # Temporarly disabled to support sgminer since we can't reliably determine # # if sgminer is mining nfactor 10 or 11 # def current_algo(self): # try: # data = self.cgminer.version() # version = data['STATUS'][0]['Description'] # if version.startswith('vertminer'): # vertminer 0.5.4pre1 # return 'nscrypt' # elif version.startswith('cgminer'): # cgminer 3.7.2 # return 'scrypt' # else: # return None # except: # self.logger.warning('Cannot connect to miner API...') # return None def switch_algo(self, algo): """Tells the current miner to exit and start the other one""" self.logger.info('=> Switching to %s (running %s)' % (algo, self.algos[algo].command)) self.current_algo = algo try: self.cgminer.quit() time.sleep(self.switchtime) # Wait for it to quit / Or check the process id? except socket.error: pass # Cgminer not running try: subprocess.Popen(self.algos[algo].command) except OSError: self.logger.critical('Cannot execute [%s]!' % self.algos[algo].command) self.logger.critical('Make sure your miner startup scripts are executable before continuing.') sys.exit() def __prepare_logger(self, logging_config={}): """Configure the logger""" logfile = logging_config.get('logfile') # Set console log level based on the config if bool(logging_config.get('verbose')): log_level = logging.DEBUG else: log_level = logging.INFO # Prepare logger self.logger = logging.getLogger() self.logger.setLevel(logging.DEBUG) ## Console logging # create console handler stream_handler = logging.StreamHandler() stream_handler.setLevel(log_level) # create formatter and add it to the handler formatter = logging.Formatter('%(asctime)s :: %(message)s', "%Y-%m-%d %H:%M:%S") stream_handler.setFormatter(formatter) # add the handler to the logger self.logger.addHandler(stream_handler) ## File logging if logfile: print "Logging to %s" % logfile # create file handler file_handler = logging.FileHandler(logfile, 'a') file_handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s :: %(levelname)8s :: %(message)s', "%Y-%m-%d %H:%M:%S") file_handler.setFormatter(formatter) self.logger.addHandler(file_handler) def __prepare_profitability_log(self, csv_file): # Check if file is already present to know if we need to write the headers write_header = not(os.path.isfile(csv_file)) self.profitability_file = open(csv_file, 'ab') dateDict = ['date'] for i in self.algos.keys(): dateDict.append(i) self.profitability_log = csv.DictWriter(self.profitability_file, dateDict) if write_header: self.profitability_log.writeheader() def __load_config(self): config_file = 'tmb-switcher.conf' # Check if the config file is present if not(os.path.isfile(config_file)): print "ERROR: Configuration file not found!" print "Make sure the tmb-switcher.conf file is present!" sys.exit() # Load the config file config = ConfigParser.ConfigParser() config.read(config_file) # Read the logging settings and setup the logger logging_config = dict(config.items('Logging')) self.__prepare_logger(logging_config) # Read the settings or use default values try: self.api_key = config.get('TradeMyBit', 'apikey') except: self.logger.critical("Could not read apikey from config file") sys.exit() try: self.idletime = config.getint('Misc','idletime') except: self.logger.warning("Could not read idletime from config file. Defaulting to 5 min") self.idletime = 5 try: self.switchtime = config.getint('Misc', 'switchtime') except: self.logger.warning("Could not read switchtime from config file. Defaulting to 1s") self.switchtime = 1 try: self.profitability_threshold = config.getfloat('Misc','profitability_threshold') except: self.logger.warning("Could not read profitability_threshold from config file. Defaulting to 10%") self.profitability_threshold = 0.1 try: self.cgminer_host = config.get('cgminer', 'host') except: self.logger.warning("Could not read cgminer host from config file. Defaulting to 127.0.0.1") self.cgminer_host = '127.0.0.1' try: self.cgminer_port = config.getint('cgminer', 'port') except: self.logger.warning("Could not read cgminer port from config file. Defaulting to 4028") self.cgminer_host = 4028 for key in dict(config.items('Algorithms')): try: if config.get('Algorithms', key) =="true": self.logger.debug(key+" Enabled!") self.algos[key] = Algo(key) script = config.get('Scripts', key) if os.path.isfile(script): self.algos[key].command = script else: self.logger.critical('Script for %s not found!' % key) self.cleanup() except ConfigParser.NoOptionError : self.logger.warning('Script for %s not configured!' % key) continue # try: csv_file = logging_config.get('profitability_log') if csv_file: self.__prepare_profitability_log(csv_file) # except: # self.logger.warning("Could not configure profitability logging. Disabling profitability log!") # self.profitability_log = None def main(): switcher = TradeMyBitSwitcher() switcher.main() if __name__ == '__main__': main()
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Adds an ad customizer feed. Associates the feed with customer and adds an ad that uses the feed to populate dynamic data. Tags: CustomerFeedService.mutate, FeedItemService.mutate Tags: FeedMappingService.mutate, FeedService.mutate Tags: AdGroupAdService.mutate """ __author__ = ('api.msaniscalchi@gmail.com (Mark Saniscalchi)', 'yufeng.dev@gmail.com (Yufeng Guo)') # Import appropriate classes from the client library. from googleads import adwords # See the Placeholder reference page for a list of all the placeholder types # and fields: # https://developers.google.com/adwords/api/docs/appendix/placeholders PLACEHOLDER_AD_CUSTOMIZER = '10' PLACEHOLDER_FIELD_INTEGER = '1' PLACEHOLDER_FIELD_FLOAT = '2' PLACEHOLDER_FIELD_PRICE = '3' PLACEHOLDER_FIELD_DATE = '4' PLACEHOLDER_FIELD_STRING = '5' ADGROUPS = [ 'INSERT_ADGROUP_ID_HERE', 'INSERT_ADGROUP_ID_HERE' ] FEEDNAME = 'INSERT_FEED_NAME_HERE' def main(client, adgroups): # Initialize appropriate services. ad_group_ad_service = client.GetService('AdGroupAdService', version='v201406') customer_feed_service = client.GetService( 'CustomerFeedService', version='v201406') feed_item_service = client.GetService('FeedItemService', version='v201406') feed_mapping_service = client.GetService( 'FeedMappingService', version='v201406') feed_service = client.GetService('FeedService', version='v201406') # First, create a customizer feed. One feed per account can be used for all # ads. customizer_feed = { 'name': FEEDNAME, 'attributes': [ {'type': 'STRING', 'name': 'Name'}, {'type': 'STRING', 'name': 'Price'}, {'type': 'DATE_TIME', 'name': 'Date'} ] } feed_service_operation = { 'operator': 'ADD', 'operand': customizer_feed } response = feed_service.mutate([feed_service_operation]) if response and 'value' in response: feed = response['value'][0] feed_data = { 'feedId': feed['id'], 'nameId': feed['attributes'][0]['id'], 'priceId': feed['attributes'][1]['id'], 'dateId': feed['attributes'][2]['id'] } print ('Feed with name \'%s\' and ID %s was added with:' '\tName attribute ID %s and price attribute ID %s and date attribute' 'ID %s') % (feed['name'], feed['id'], feed_data['nameId'], feed_data['priceId'], feed_data['dateId']) else: raise Exception('No feeds were added') # Creating feed mapping to map the fields with customizer IDs. feed_mapping = { 'placeholderType': PLACEHOLDER_AD_CUSTOMIZER, 'feedId': feed_data['feedId'], 'attributeFieldMappings': [ { 'feedAttributeId': feed_data['nameId'], 'fieldId': PLACEHOLDER_FIELD_STRING }, { 'feedAttributeId': feed_data['priceId'], 'fieldId': PLACEHOLDER_FIELD_PRICE }, { 'feedAttributeId': feed_data['dateId'], 'fieldId': PLACEHOLDER_FIELD_DATE } ] } feed_mapping_operation = { 'operator': 'ADD', 'operand': feed_mapping } response = feed_mapping_service.mutate([feed_mapping_operation]) if response and 'value' in response: feed_mapping = response['value'][0] print ('Feed mapping with ID %s and placeholder type %s was saved for feed' ' with ID %s.') % (feed_mapping['feedMappingId'], feed_mapping['placeholderType'], feed_mapping['feedId']) else: raise Exception('No feed mappings were added.') # Now adding feed items -- the values we'd like to place. items_data = [ { 'name': 'Mars', 'price': '$1234.56', 'date': '20140601 000000', 'adGroupId': adgroups[0] }, { 'name': 'Venus', 'price': '$1450.00', 'date': '20140615 120000', 'adGroupId': adgroups[1] } ] feed_items = [{'feedId': feed_data['feedId'], 'adGroupTargeting': { 'TargetingAdGroupId': item['adGroupId'] }, 'attributeValues': [ { 'feedAttributeId': feed_data['nameId'], 'stringValue': item['name'] }, { 'feedAttributeId': feed_data['priceId'], 'stringValue': item['price'] }, { 'feedAttributeId': feed_data['dateId'], 'stringValue': item['date'] } ]} for item in items_data] feed_item_operations = [{ 'operator': 'ADD', 'operand': feed_item } for feed_item in feed_items] response = feed_item_service.mutate(feed_item_operations) if response and 'value' in response: for feed_item in response['value']: print 'Feed item with ID %s was added.' % feed_item['feedItemId'] else: raise Exception('No feed items were added.') # Finally, creating a customer (account-level) feed with a matching function # that determines when to use this feed. For this case we use the "IDENTITY" # matching function that is always 'true' just to associate this feed with # the customer. The targeting is done within the feed items using the # :campaign_targeting, :ad_group_targeting, or :keyword_targeting attributes. matching_function = { 'operator': 'IDENTITY', 'lhsOperand': [ { 'xsi_type': 'ConstantOperand', 'type': 'BOOLEAN', 'booleanValue': 'true' } ] } customer_feed = { 'feedId': feed_data['feedId'], 'matchingFunction': matching_function, 'placeholderTypes': [PLACEHOLDER_AD_CUSTOMIZER] } customer_feed_operation = { 'operator': 'ADD', 'operand': customer_feed } response = customer_feed_service.mutate([customer_feed_operation]) if response and 'value' in response: feed = response['value'][0] print 'Customer feed with ID %s was added.' % feed['feedId'] else: raise Exception('No customer feeds were added.') # All set! We can now create ads with customizations. text_ad = { 'xsi_type': 'TextAd', 'headline': 'Luxury Cruise to {=%s.Name}' % FEEDNAME, 'description1': 'Only {=%s.Price}' % FEEDNAME, 'description2': 'Offer ends in {=countdown(%s.Date)}!' % FEEDNAME, 'url': 'http://www.example.com', 'displayUrl': 'www.example.com' } # We add the same ad to both ad groups. When they serve, they will show # different values, since they match different feed items. operations = [{ 'operator': 'ADD', 'operand': { 'adGroupId': adgroup, 'ad': text_ad } } for adgroup in adgroups] print operations response = ad_group_ad_service.mutate(operations) print '===ad group ad service===' print response if response and 'value' in response: for ad in response['value']: print ('\tCreated an ad with ID \'%s\', type \'%s\', and status \'%s\'.' % (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status'])) else: raise Exception('No ads were added.') if __name__ == '__main__': # Initialize client object. adwords_client = adwords.AdWordsClient.LoadFromStorage() main(adwords_client, ADGROUPS)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ 21a.py ~~~~~~ Advent of Code 2017 - Day 21: Fractal Art Part One You find a program trying to generate some art. It uses a strange process that involves repeatedly enhancing the detail of an image through a set of rules. The image consists of a two-dimensional square grid of pixels that are either on (#) or off (.). The program always begins with this pattern: .#. ..# ### Because the pattern is both 3 pixels wide and 3 pixels tall, it is said to have a size of 3. Then, the program repeats the following process: If the size is evenly divisible by 2, break the pixels up into 2x2 squares, and convert each 2x2 square into a 3x3 square by following the corresponding enhancement rule. Otherwise, the size is evenly divisible by 3; break the pixels up into 3x3 squares, and convert each 3x3 square into a 4x4 square by following the corresponding enhancement rule. Because each square of pixels is replaced by a larger one, the image gains pixels and so its size increases. The artist's book of enhancement rules is nearby (your puzzle input); however, it seems to be missing rules. The artist explains that sometimes, one must rotate or flip the input pattern to find a match. (Never rotate or flip the output pattern, though.) Each pattern is written concisely: rows are listed as single units, ordered top-down, and separated by slashes. For example, the following rules correspond to the adjacent patterns: ../.# = .. .# .#. .#./..#/### = ..# ### #..# #..#/..../#..#/.##. = .... #..# .##. When searching for a rule to use, rotate and flip the pattern as necessary. For example, all of the following patterns match the same rule: .#. .#. #.. ### ..# #.. #.# ..# ### ### ##. .#. Suppose the book contained the following two rules: ../.# => ##./#../... .#./..#/### => #..#/..../..../#..# As before, the program begins with this pattern: .#. ..# ### The size of the grid (3) is not divisible by 2, but it is divisible by 3. It divides evenly into a single square; the square matches the second rule, which produces: #..# .... .... #..# The size of this enhanced grid (4) is evenly divisible by 2, so that rule is used. It divides evenly into four squares: #.|.# ..|.. --+-- ..|.. #.|.# Each of these squares matches the same rule (../.# => ##./#../...), three of which require some flipping and rotation to line up with the rule. The output for the rule is the same in all four cases: ##.|##. #..|#.. ...|... ---+--- ##.|##. #..|#.. ...|... Finally, the squares are joined into a new grid: ##.##. #..#.. ...... ##.##. #..#.. ...... Thus, after 2 iterations, the grid contains 12 pixels that are on. How many pixels stay on after 5 iterations? :copyright: (c) 2017 by Martin Bor. :license: MIT, see LICENSE for more details. """ import sys import math from typing import Dict, List, Tuple, Iterator Grid = Tuple[Tuple[str, ...], ...] def transform(grid: Grid, rules: Dict[Grid, Grid]) -> Grid: """Transform a (2x2 or 3x3) grid given the ruleset. A grid matches if any of its rotations and mirrored rotations matches a rule. :grid: 2x2 or 3x3 grid to transform :rules: dict of transformation rules :returns: transformed grid >>> grid = (('#', '.'), ('#', '.')) >>> rules = {(('#', '.'), ('#', '.')): ... (('#', '#', '#'), ('.', '.', '.'), ('#', '.', '#'))} >>> transform(grid, rules) (('#', '#', '#'), ('.', '.', '.'), ('#', '.', '#')) """ for _ in range(2): for _ in range(4): if grid in rules: return rules[grid] grid = rotate(grid) grid = transpose(grid) assert False def to_pattern(grid: Grid) -> str: """Convert grid into pattern. :grid: grid to convert :returns: grid in pattern form >>> to_pattern((('#', '#', '#'), ('.', '.', '.'), ('#', '.', '#'))) '###/.../#.#' """ return '/'.join(''.join(row) for row in grid) def rotate(grid: Grid) -> Grid: """Rotate grid clockwise :grid: grid to rotate clockwise :returns: rotated grid >>> rotate(((1, 2, 3), (4, 5, 6), (7, 8, 9))) ((7, 4, 1), (8, 5, 2), (9, 6, 3)) """ return tuple(zip(*grid[::-1])) def transpose(grid: Grid) -> Grid: """Transpose (flip) grid along the diagonal :grid: grid to flip :returns: grid flipped along the diagonal >>> transpose(((1, 2, 3), (4, 5, 6), (7, 8, 9))) ((1, 4, 7), (2, 5, 8), (3, 6, 9)) """ return tuple(zip(*grid)) def to_grid(pattern: str) -> Grid: """Convert a pattern into a grid :pattern: pattern to convert :returns: grid >>> to_grid('..#/##./#.#') (('.', '.', '#'), ('#', '#', '.'), ('#', '.', '#')) """ return tuple(tuple(row) for row in pattern.split('/')) def divide(grid: Grid) -> Iterator[Grid]: """Divide grid into 2x2 or 3x3 tiles :grid: NxN grid :returns: generator that yields 2x2 or 3x3 tiles, depending on whether the grid is divisible by 2 or 3. >>> g = divide(( ... ('#', '.', '.', '.'), ... ('.', '.', '.', '.'), ... ('#', '.', '#', '.'), ... ('#', '#', '#', '#'))) >>> next(g) (('#', '.'), ('.', '.')) >>> next(g) (('.', '.'), ('.', '.')) >>> next(g) (('#', '.'), ('#', '#')) >>> next(g) (('#', '.'), ('#', '#')) """ step = 2 if len(grid) % 2 == 0 else 3 for y in range(0, len(grid), step): for x in range(0, len(grid), step): yield tuple(tuple(grid[r][x:x + step]) for r in range(y, y + step)) def merge(tgrid: List[Grid]) -> Grid: """Combine a tiled grid into a single grid >>> merge([ ... (('#', '.'), ('.', '.')), ... (('.', '.'), ('.', '.')), ... (('#', '.'), ('#', '#')), ... (('#', '.'), ('#', '#'))]) (('#', '.', '.', '.'), ('.', '.', '.', '.'), ('#', '.', '#', '.'), ('#', '#', '#', '#')) """ tiles_per_row = int(math.sqrt(len(tgrid))) grid = () for tile_offset in range(0, tiles_per_row): for row in range(0, len(tgrid[0])): r = () for tile in range(0, tiles_per_row): r += tgrid[tile + tile_offset * tiles_per_row][row] grid += (r,) return grid def show(grid: Grid): """Pretty print a grid""" print('\n'.join(''.join(row) for row in grid)) def parse(rulebook: str) -> Dict[Grid, Grid]: """Parse rulebook. :rulebook: list of transformations :returns: dict with grid to grid transforms >>> parse('''../.# => ##./#../... ... .#./..#/### => #..#/..../..../#..#''') {(('.', '.'), ('.', '#')): (('#', '#', '.'), ('#', '.', '.'), ('.', '.', '.')), (('.', '#', '.'), ('.', '.', '#'), ('#', '#', '#')): (('#', '.', '.', '#'), ('.', '.', '.', '.'), ('.', '.', '.', '.'), ('#', '.', '.', '#'))} """ rules = {} for line in rulebook.strip().split('\n'): src, dst = map(to_grid, line.strip().split(' => ')) rules[src] = dst return rules def solve(rulebook): """Count numer of on pixels after 5 iterations. :rulebook: list of enhancement rules :returns: number of on pixels after 5 iterations """ rules = parse(rulebook) # start pattern grid = to_grid('.#./..#/###') for i in range(5): tiles = [] for tile in divide(grid): tiles.append(transform(tile, rules)) grid = merge(tiles) return to_pattern(grid).count('#') def main(argv): if len(argv) == 2: f = open(argv[1], 'r') else: sys.stderr.write('reading from stdin...\n') f = sys.stdin print(solve(f.read().strip())) if __name__ == "__main__": sys.exit(main(sys.argv))
""" This module provides the Scan Op Scanning is a general form of recurrence, which can be used for looping. The idea is that you *scan* a function along some input sequence, producing an output at each time-step that can be seen (but not modified) by the function at the next time-step. (Technically, the function can see the previous K time-steps of your outputs and L time steps (from past and future) of your inputs. So for example, ``sum()`` could be computed by scanning the ``z+x_i`` function over a list, given an initial state of ``z=0``. Special cases: * A *reduce* operation can be performed by using only the last output of a ``scan``. * A *map* operation can be performed by applying a function that ignores previous steps of the outputs. Often a for-loop or while-loop can be expressed as a ``scan()`` operation, and ``scan`` is the closest that theano comes to looping. The advantages of using ``scan`` over `for` loops in python (amongs other) are: * it allows the number of iterations to be part of the symbolic graph * it allows computing gradients through the for loop * there exist a bunch of optimizations that help re-write your loop such that less memory is used and that it runs faster * it ensures that data is not copied from host to gpu and gpu to host at each step The Scan Op should typically be used by calling any of the following functions: ``scan()``, ``map()``, ``reduce()``, ``foldl()``, ``foldr()``. """ __docformat__ = 'restructedtext en' __authors__ = ("Razvan Pascanu " "Frederic Bastien " "James Bergstra " "Pascal Lamblin ") __copyright__ = "(c) 2010, Universite de Montreal" __contact__ = "Razvan Pascanu <r.pascanu@gmail>" import logging import numpy from six.moves import xrange from theano import gof from theano.compat import izip from theano.tensor import opt, TensorVariable from theano.tensor.sharedvar import TensorSharedVariable from theano import tensor from theano.scalar.sharedvar import shared as scalar_shared from theano.compile.pfunc import rebuild_collect_shared from . import scan_op from . import scan_utils # Logging function for sending warning or info _logger = logging.getLogger('theano.scan_module.scan') def scan(fn, sequences=None, outputs_info=None, non_sequences=None, n_steps=None, truncate_gradient=-1, go_backwards=False, mode=None, name=None, options=None, profile=False): """ This function constructs and applies a Scan op to the provided arguments. :param fn: ``fn`` is a function that describes the operations involved in one step of ``scan``. ``fn`` should construct variables describing the output of one iteration step. It should expect as input theano variables representing all the slices of the input sequences and previous values of the outputs, as well as all other arguments given to scan as ``non_sequences``. The order in which scan passes these variables to ``fn`` is the following : * all time slices of the first sequence * all time slices of the second sequence * ... * all time slices of the last sequence * all past slices of the first output * all past slices of the second otuput * ... * all past slices of the last output * all other arguments (the list given as `non_sequences` to scan) The order of the sequences is the same as the one in the list `sequences` given to scan. The order of the outputs is the same as the order of ``outputs_info``. For any sequence or output the order of the time slices is the same as the one in which they have been given as taps. For example if one writes the following : .. code-block:: python scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1]) , Sequence2 , dict(input = Sequence3, taps = 3) ] , outputs_info = [ dict(initial = Output1, taps = [-3,-5]) , dict(initial = Output2, taps = None) , Output3 ] , non_sequences = [ Argument1, Argument 2]) ``fn`` should expect the following arguments in this given order: #. ``Sequence1[t-3]`` #. ``Sequence1[t+2]`` #. ``Sequence1[t-1]`` #. ``Sequence2[t]`` #. ``Sequence3[t+3]`` #. ``Output1[t-3]`` #. ``Output1[t-5]`` #. ``Output3[t-1]`` #. ``Argument1`` #. ``Argument2`` The list of ``non_sequences`` can also contain shared variables used in the function, though ``scan`` is able to figure those out on its own so they can be skipped. For the clarity of the code we recommend though to provide them to scan. To some extend ``scan`` can also figure out other ``non sequences`` (not shared) even if not passed to scan (but used by `fn`). A simple example of this would be : .. code-block:: python import theano.tensor as TT W = TT.matrix() W_2 = W**2 def f(x): return TT.dot(x,W_2) The function is expected to return two things. One is a list of outputs ordered in the same order as ``outputs_info``, with the difference that there should be only one output variable per output initial state (even if no tap value is used). Secondly `fn` should return an update dictionary (that tells how to update any shared variable after each iteration step). The dictionary can optionally be given as a list of tuples. There is no constraint on the order of these two list, ``fn`` can return either ``(outputs_list, update_dictionary)`` or ``(update_dictionary, outputs_list)`` or just one of the two (in case the other is empty). To use ``scan`` as a while loop, the user needs to change the function ``fn`` such that also a stopping condition is returned. To do so, he/she needs to wrap the condition in an ``until`` class. The condition should be returned as a third element, for example: .. code-block:: python ... return [y1_t, y2_t], {x:x+1}, theano.scan_module.until(x < 50) Note that a number of steps (considered in here as the maximum number of steps ) is still required even though a condition is passed (and it is used to allocate memory if needed). = {}): :param sequences: ``sequences`` is the list of Theano variables or dictionaries describing the sequences ``scan`` has to iterate over. If a sequence is given as wrapped in a dictionary, then a set of optional information can be provided about the sequence. The dictionary should have the following keys: * ``input`` (*mandatory*) -- Theano variable representing the sequence. * ``taps`` -- Temporal taps of the sequence required by ``fn``. They are provided as a list of integers, where a value ``k`` impiles that at iteration step ``t`` scan will pass to ``fn`` the slice ``t+k``. Default value is ``[0]`` Any Theano variable in the list ``sequences`` is automatically wrapped into a dictionary where ``taps`` is set to ``[0]`` :param outputs_info: ``outputs_info`` is the list of Theano variables or dictionaries describing the initial state of the outputs computed recurrently. When this initial states are given as dictionary optional information can be provided about the output corresponding to these initial states. The dictionary should have the following keys: * ``initial`` -- Theano variable that represents the initial state of a given output. In case the output is not computed recursively (think of a map) and does not require a initial state this field can be skiped. Given that only the previous time step of the output is used by ``fn`` the initial state should have the same shape as the output. If multiple time taps are used, the initial state should have one extra dimension that should cover all the possible taps. For example if we use ``-5``, ``-2`` and ``-1`` as past taps, at step 0, ``fn`` will require (by an abuse of notation) ``output[-5]``, ``output[-2]`` and ``output[-1]``. This will be given by the initial state, which in this case should have the shape (5,)+output.shape. If this variable containing the initial state is called ``init_y`` then ``init_y[0]`` *corresponds to* ``output[-5]``. ``init_y[1]`` *correponds to* ``output[-4]``, ``init_y[2]`` corresponds to ``output[-3]``, ``init_y[3]`` coresponds to ``output[-2]``, ``init_y[4]`` corresponds to ``output[-1]``. While this order might seem strange, it comes natural from splitting an array at a given point. Assume that we have a array ``x``, and we choose ``k`` to be time step ``0``. Then our initial state would be ``x[:k]``, while the output will be ``x[k:]``. Looking at this split, elements in ``x[:k]`` are ordered exactly like those in ``init_y``. * ``taps`` -- Temporal taps of the output that will be pass to ``fn``. They are provided as a list of *negative* integers, where a value ``k`` implies that at iteration step ``t`` scan will pass to ``fn`` the slice ``t+k``. ``scan`` will follow this logic if partial information is given: * If an output is not wrapped in a dictionary, ``scan`` will wrap it in one assuming that you use only the last step of the output (i.e. it makes your tap value list equal to [-1]). * If you wrap an output in a dictionary and you do not provide any taps but you provide an initial state it will assume that you are using only a tap value of -1. * If you wrap an output in a dictionary but you do not provide any initial state, it assumes that you are not using any form of taps. * If you provide a ``None`` instead of a variable or a empty dictionary ``scan`` assumes that you will not use any taps for this output (like for example in case of a map) If ``outputs_info`` is an empty list or None, ``scan`` assumes that no tap is used for any of the outputs. If information is provided just for a subset of the outputs an exception is raised (because there is no convention on how scan should map the provided information to the outputs of ``fn``) :param non_sequences: ``non_sequences`` is the list of arguments that are passed to ``fn`` at each steps. One can opt to exclude variable used in ``fn`` from this list as long as they are part of the computational graph, though for clarity we encourage not to do so. :param n_steps: ``n_steps`` is the number of steps to iterate given as an int or Theano scalar. If any of the input sequences do not have enough elements, scan will raise an error. If the *value is 0* the outputs will have *0 rows*. If the value is negative, ``scan`` will run backwards in time. If the ``go_backwards`` flag is already set and also ``n_steps`` is negative, ``scan`` will run forward in time. If n stpes is not provided, ``scan`` will figure out the amount of steps it should run given its input sequences. :param truncate_gradient: ``truncate_gradient`` is the number of steps to use in truncated BPTT. If you compute gradients through a scan op, they are computed using backpropagation through time. By providing a different value then -1, you choose to use truncated BPTT instead of classical BPTT, where you go for only ``truncate_gradient`` number of steps back in time. :param go_backwards: ``go_backwards`` is a flag indicating if ``scan`` should go backwards through the sequences. If you think of each sequence as indexed by time, making this flag True would mean that ``scan`` goes back in time, namely that for any sequence it starts from the end and goes towards 0. :param name: When profiling ``scan``, it is crucial to provide a name for any instance of ``scan``. The profiler will produce an overall profile of your code as well as profiles for the computation of one step of each instance of ``scan``. The ``name`` of the instance appears in those profiles and can greatly help to disambiguate information. :param mode: It is recommended to leave this argument to None, especially when profiling ``scan`` (otherwise the results are not going to be accurate). If you prefer the computations of one step of ``scan`` to be done differently then the entire function, you can use this parameter to describe how the computations in this loop are done (see ``theano.function`` for details about possible values and their meaning). :param profile: Flag or string. If true, or different from the empty string, a profile object will be created and attached to the inner graph of scan. In case ``profile`` is True, the profile object will have the name of the scan instance, otherwise it will have the passed string. Profile object collect (and print) information only when running the inner graph with the new cvm linker ( with default modes, other linkers this argument is useless) :rtype: tuple :return: tuple of the form (outputs, updates); ``outputs`` is either a Theano variable or a list of Theano variables representing the outputs of ``scan`` (in the same order as in ``outputs_info``). ``updates`` is a subclass of dictionary specifying the update rules for all shared variables used in scan This dictionary should be passed to ``theano.function`` when you compile your function. The change compared to a normal dictionary is that we validate that keys are SharedVariable and addition of those dictionary are validated to be consistent. """ # Note : see the internal documentation of the scan op for naming # conventions and all other details if options is None: options = {} rvals = scan_utils.canonical_arguments(sequences, outputs_info, non_sequences, go_backwards, n_steps) inputs, states_and_outputs_info, parameters, T = rvals # If we provided a known number of steps ( before compilation) # and if that number is 1 or -1, then we can skip the Scan Op, # and just apply the inner function once # To do that we check here to see the nature of n_steps T_value = None if isinstance(n_steps, (float, int)): T_value = int(n_steps) else: try: T_value = opt.get_scalar_constant_value(n_steps) except (TypeError, AttributeError): T_value = None if T_value in (1, -1): return one_step_scan(fn, inputs, states_and_outputs_info, parameters, truncate_gradient) # 1. Variable representing the current time step t = scalar_shared(numpy.int64(0), name='t') # 2. Allocate memory for the states of scan. mintaps = [] lengths = [] for pos, arg_info in enumerate(states_and_outputs_info): if arg_info.get('taps', None) == [-1]: mintaps.append(1) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) arg_info['initial'] = scan_utils.expand(tensor.unbroadcast( tensor.shape_padleft(arg_info['initial']), 0), T) elif arg_info.get('taps', None): if numpy.any(numpy.array(arg_info.get('taps', [])) > 0): # Make sure we do not have requests for future values of a # sequence we can not provide such values raise ValueError('Can not use future taps of outputs', arg_info) mintap = abs(numpy.min(arg_info['taps'])) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) mintaps.append(mintap) arg_info['initial'] = scan_utils.expand( arg_info['initial'][:mintap], T) else: mintaps.append(0) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) # 3. Generate arguments for the function passed to scan. This will # function will return the outputs that need to be computed at every # timesteps inputs_slices = [input[t] for input in inputs] states_slices = [] for n, state in enumerate(states_and_outputs_info): # Check if it is actually a state and not an output if mintaps[n] != 0: for k in state['taps']: states_slices.append( state['initial'][(t + mintaps[n] + k) % lengths[n]]) # 4. Construct outputs that are to be computed by the inner # function of scan args = inputs_slices + states_slices + parameters cond, states_and_outputs, updates = \ scan_utils.get_updates_and_outputs(fn(*args)) # User is allowed to provide no information if it only behaves like a # map if (len(states_and_outputs) != len(states_and_outputs_info) and len(states_and_outputs_info) == 0): mintaps = [0] * len(states_and_outputs) # 5. Construct the scan op # 5.1 Construct list of shared variables with updates (those that # can be treated as states (i.e. of TensorType) and those that can not # (like Random States) if cond is not None: _cond = [cond] else: _cond = [] rvals = rebuild_collect_shared( states_and_outputs + _cond, updates=updates, rebuild_strict=True, copy_inputs_over=True, no_default_updates=False) # extracting the arguments input_variables, cloned_outputs, other_rval = rvals clone_d, update_d, update_expr, shared_inputs = other_rval additional_input_states = [] additional_output_states = [] additional_lengths = [] additional_mintaps = [] original_numeric_shared_variables = [] non_numeric_input_states = [] non_numeric_output_states = [] original_non_numeric_shared_variables = [] pos = len(lengths) for sv in shared_inputs: if sv in update_d: if isinstance(sv, (TensorVariable, TensorSharedVariable)): # We can treat it as a sit sot nw_state = scan_utils.expand( tensor.unbroadcast(tensor.shape_padleft(sv), 0), T) additional_lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) pos = pos + 1 additional_mintaps.append(1) additional_input_states.append(nw_state) additional_output_states.append( scan_utils.clone(tensor.set_subtensor( nw_state[(t + 1) % additional_lengths[-1]], update_d[sv]))) original_numeric_shared_variables.append(sv) else: non_numeric_input_states.append(sv) non_numeric_output_states.append(update_d[sv]) original_non_numeric_shared_variables.append(sv) # Replace shared variables in the update _additional_output_states = [] replace = {} for sv, buf in zip(original_numeric_shared_variables, additional_input_states): replace[sv] = buf[t] for out in additional_output_states: _additional_output_states.append( scan_utils.clone(out, replace=replace)) additional_output_states = _additional_output_states # 5.2 Collect inputs/outputs of the inner function inputs = [] outputs = [] for n, mintap in enumerate(mintaps): if mintap != 0: input_state = states_and_outputs_info[n]['initial'] inputs.append(input_state) outputs.append( tensor.set_subtensor( input_state[(t + mintap) % lengths[n]], states_and_outputs[n])) else: mem_buffer = scan_utils.allocate_memory( T, states_and_outputs_info[n], states_and_outputs[n]) inputs.append(output) outputs.append( tensor.set_subtensor(output[t % lengths[n]], states_and_outputs[n])) inputs.extend(additional_input_states) outputs.extend(additional_output_states) lengths.extend(additional_lengths) mintaps.extend(additional_mintaps) inputs.extend(non_numeric_input_states) outputs.extend(non_numeric_output_states) all_other_inputs = gof.graph.inputs(outputs) parameters = [x for x in all_other_inputs if (x not in inputs and x not in lengths and x is not t and isinstance(x, gof.Variable) and not isinstance(x, gof.Constant))] inputs.extend(parameters) # 5.3 Construct the the options dictionary options['name'] = name options['profile'] = profile options['mode'] = mode options['inplace'] = False options['gpu'] = False options['truncate_gradient'] = truncate_gradient options['hash_inner_graph'] = 0 # 5.4 Construct the ScanOp instance local_op = scan_op.ScanOp(inputs=inputs, outputs=outputs, lengths=lengths, switches=[], mintaps=mintaps, index=t, options=options, as_repeatUntil=cond) # Note that we get here all the outputs followed by the update rules to # the shared variables we had in our scan # we know that we have (in this given order): # * len(states_and_outputs) real outputs # * len(additional_input_states) updates for numeric shared variable # * len(non_numeric_input_states) updates for non numeric shared # variables scan_inputs = [T] + inputs scan_outputs_update_rules = scan_utils.to_list(local_op(*scan_inputs)) # 5.5 Collect outputs and add permutation object scan_outputs = [] for pos in xrange(len(states_and_outputs)): out = scan_utils.ScanPermutation(mintaps[pos])( scan_outputs_update_rules[pos], t) scan_outputs.append(out[mintaps[pos]:]) # 5.6 Construct updates dictionary update_rules = scan_outputs_update_rules[len(states_and_outputs):] updates = {} for v, u in izip(original_numeric_shared_variables, update_rules[:len(additional_input_states)]): updates[v] = u[-1] for v, u in izip(original_non_numeric_shared_variables, update_rules[len(additional_input_states):]): updates[v] = u # Step 5.7 We are done and can return everything back to the user return scan_outputs, updates def one_step_scan(fn, inputs, states_and_outputs_info, parameters, truncate_gradient): """ This function is evaluated if `n_steps` evaluates to either 1 or -1. """ # 1. Grab slices of sequences inputs_slices = [input[0] for input in inputs] # 2. Grab slices of states states_slices = [] for n, arg_info in enumerate(states_and_outputs_info): if arg_info.get('taps', None) == [-1]: states_slices.append(arg_info['initial']) elif arg_info.get('taps', None): if numpy.any(numpy.array(arg_info.get('taps', [])) > 0): # Make sure we do not have requests for future values of a # sequence we can not provide such values raise ValueError('Can not use future taps of outputs', arg_info) # go through the taps mintap = abs(numpy.min(arg_info['taps'])) states_slices.extend( [arg_info['initial'][k + mintap] for k in arg_info['taps']]) # Re-order args args = (inputs_slices + states_slices + parameters) cond, states_and_outputs, updates = \ scan_utils.get_updates_and_outputs(fn(*args)) # We do not need to use the scan op anymore, so we can just return # the outputs and updates we have if cond is not None: _logger.warning(('When the number of steps is fixed and equal ' 'to 1, the provided stopping condition, ', str(cond), ' is ignored')) states_and_outputs = [tensor.unbroadcast( tensor.shape_padleft(arg), 0) for arg in states_and_outputs] if len(states_and_outputs) == 1: states_and_outputs = states_and_outputs[0] return (states_and_outputs, updates)
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ System-level utilities and helper functions. """ import math import re import sys import unicodedata import six from barbican.openstack.common.gettextutils import _ UNIT_PREFIX_EXPONENT = { 'k': 1, 'K': 1, 'Ki': 1, 'M': 2, 'Mi': 2, 'G': 3, 'Gi': 3, 'T': 4, 'Ti': 4, } UNIT_SYSTEM_INFO = { 'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')), 'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')), } TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") def int_from_bool_as_string(subject): """Interpret a string as a boolean and return either 1 or 0. Any string value in: ('True', 'true', 'On', 'on', '1') is interpreted as a boolean True. Useful for JSON-decoded stuff and config file parsing """ return bool_from_string(subject) and 1 or 0 def bool_from_string(subject, strict=False, default=False): """Interpret a string as a boolean. A case-insensitive match is performed such that strings matching 't', 'true', 'on', 'y', 'yes', or '1' are considered True and, when `strict=False`, anything else returns the value specified by 'default'. Useful for JSON-decoded stuff and config file parsing. If `strict=True`, unrecognized values, including None, will raise a ValueError which is useful when parsing values passed in from an API call. Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. """ if not isinstance(subject, six.string_types): subject = six.text_type(subject) lowered = subject.strip().lower() if lowered in TRUE_STRINGS: return True elif lowered in FALSE_STRINGS: return False elif strict: acceptable = ', '.join( "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) msg = _("Unrecognized value '%(val)s', acceptable values are:" " %(acceptable)s") % {'val': subject, 'acceptable': acceptable} raise ValueError(msg) else: return default def safe_decode(text, incoming=None, errors='strict'): """Decodes incoming text/bytes string using `incoming` if they're not already unicode. :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a unicode `incoming` encoded representation of it. :raises TypeError: If text is not an instance of str """ if not isinstance(text, (six.string_types, six.binary_type)): raise TypeError("%s can't be decoded" % type(text)) if isinstance(text, six.text_type): return text if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) try: return text.decode(incoming, errors) except UnicodeDecodeError: # Note(flaper87) If we get here, it means that # sys.stdin.encoding / sys.getdefaultencoding # didn't return a suitable encoding to decode # text. This happens mostly when global LANG # var is not set correctly and there's no # default encoding. In this case, most likely # python will use ASCII or ANSI encoders as # default encodings but they won't be capable # of decoding non-ASCII characters. # # Also, UTF-8 is being used since it's an ASCII # extension. return text.decode('utf-8', errors) def safe_encode(text, incoming=None, encoding='utf-8', errors='strict'): """Encodes incoming text/bytes string using `encoding`. If incoming is not specified, text is expected to be encoded with current python's default encoding. (`sys.getdefaultencoding`) :param incoming: Text's current encoding :param encoding: Expected encoding for text (Default UTF-8) :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a bytestring `encoding` encoded representation of it. :raises TypeError: If text is not an instance of str """ if not isinstance(text, (six.string_types, six.binary_type)): raise TypeError("%s can't be encoded" % type(text)) if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) if isinstance(text, six.text_type): return text.encode(encoding, errors) elif text and encoding != incoming: # Decode text before encoding it with `encoding` text = safe_decode(text, incoming, errors) return text.encode(encoding, errors) else: return text def string_to_bytes(text, unit_system='IEC', return_int=False): """Converts a string into an float representation of bytes. The units supported for IEC :: Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) KB, KiB, MB, MiB, GB, GiB, TB, TiB The units supported for SI :: kb(it), Mb(it), Gb(it), Tb(it) kB, MB, GB, TB Note that the SI unit system does not support capital letter 'K' :param text: String input for bytes size conversion. :param unit_system: Unit system for byte size conversion. :param return_int: If True, returns integer representation of text in bytes. (default: decimal) :returns: Numerical representation of text in bytes. :raises ValueError: If text has an invalid value. """ try: base, reg_ex = UNIT_SYSTEM_INFO[unit_system] except KeyError: msg = _('Invalid unit system: "%s"') % unit_system raise ValueError(msg) match = reg_ex.match(text) if match: magnitude = float(match.group(1)) unit_prefix = match.group(2) if match.group(3) in ['b', 'bit']: magnitude /= 8 else: msg = _('Invalid string format: %s') % text raise ValueError(msg) if not unit_prefix: res = magnitude else: res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) if return_int: return int(math.ceil(res)) return res def to_slug(value, incoming=None, errors="strict"): """Normalize string. Convert to lowercase, remove non-word characters, and convert spaces to hyphens. Inspired by Django's `slugify` filter. :param value: Text to slugify :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: slugified unicode representation of `value` :raises TypeError: If text is not an instance of str """ value = safe_decode(value, incoming, errors) # NOTE(aababilov): no need to use safe_(encode|decode) here: # encodings are always "ascii", error handling is always "ignore" # and types are always known (first: unicode; second: str) value = unicodedata.normalize("NFKD", value).encode( "ascii", "ignore").decode("ascii") value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() return SLUGIFY_HYPHENATE_RE.sub("-", value)