repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
openstack/nova
nova/tests/unit/network/test_os_vif_util.py
2
48551
# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_vif import objects as osv_objects from os_vif.objects import fields as os_vif_fields from nova import exception from nova.network import model from nova.network import os_vif_util from nova import objects from nova import test class OSVIFUtilTestCase(test.NoDBTestCase): def setUp(self): super(OSVIFUtilTestCase, self).setUp() osv_objects.register_all() # Remove when all os-vif objects include the # ComparableVersionedObject mix-in def assertObjEqual(self, expect, actual): actual.obj_reset_changes(recursive=True) expect.obj_reset_changes(recursive=True) self.assertEqual(expect.obj_to_primitive(), actual.obj_to_primitive()) def test_nova_to_osvif_instance(self): inst = objects.Instance( id="1242", uuid="d5b1090c-9e00-4fa4-9504-4b1494857970", project_id="2f37d7f6-e51a-4a1f-8b6e-b0917ffc8390") info = os_vif_util.nova_to_osvif_instance(inst) expect = osv_objects.instance_info.InstanceInfo( uuid="d5b1090c-9e00-4fa4-9504-4b1494857970", name="instance-000004da", project_id="2f37d7f6-e51a-4a1f-8b6e-b0917ffc8390") self.assertObjEqual(info, expect) def test_nova_to_osvif_instance_minimal(self): inst = objects.Instance( id="1242", uuid="d5b1090c-9e00-4fa4-9504-4b1494857970") actual = os_vif_util.nova_to_osvif_instance(inst) expect = osv_objects.instance_info.InstanceInfo( uuid=inst.uuid, name=inst.name) self.assertObjEqual(expect, actual) def test_nova_to_osvif_ips(self): ips = [ model.FixedIP( address="192.168.122.24", floating_ips=[ model.IP(address="192.168.122.100", type="floating"), model.IP(address="192.168.122.101", type="floating"), model.IP(address="192.168.122.102", type="floating"), ], version=4), model.FixedIP( address="2001::beef", version=6), ] actual = os_vif_util._nova_to_osvif_ips(ips) expect = osv_objects.fixed_ip.FixedIPList( objects=[ osv_objects.fixed_ip.FixedIP( address="192.168.122.24", floating_ips=[ "192.168.122.100", "192.168.122.101", "192.168.122.102", ]), osv_objects.fixed_ip.FixedIP( address="2001::beef", floating_ips=[]), ], ) self.assertObjEqual(expect, actual) def test_nova_to_osvif_routes(self): routes = [ model.Route(cidr="192.168.1.0/24", gateway=model.IP( address="192.168.1.254", type='gateway'), interface="eth0"), model.Route(cidr="10.0.0.0/8", gateway=model.IP( address="10.0.0.1", type='gateway')), ] expect = osv_objects.route.RouteList( objects=[ osv_objects.route.Route( cidr="192.168.1.0/24", gateway="192.168.1.254", interface="eth0"), osv_objects.route.Route( cidr="10.0.0.0/8", gateway="10.0.0.1"), ]) actual = os_vif_util._nova_to_osvif_routes(routes) self.assertObjEqual(expect, actual) def test_nova_to_osvif_subnets(self): subnets = [ model.Subnet(cidr="192.168.1.0/24", dns=[ model.IP( address="192.168.1.1", type="dns"), model.IP( address="192.168.1.2", type="dns"), ], gateway=model.IP( address="192.168.1.254", type='gateway'), ips=[ model.FixedIP( address="192.168.1.100", ), model.FixedIP( address="192.168.1.101", ), ], routes=[ model.Route( cidr="10.0.0.1/24", gateway=model.IP( address="192.168.1.254", type="gateway"), interface="eth0"), ]), model.Subnet(dns=[ model.IP( address="192.168.1.1", type="dns"), model.IP( address="192.168.1.2", type="dns"), ], ips=[ model.FixedIP( address="192.168.1.100", ), model.FixedIP( address="192.168.1.101", ), ], routes=[ model.Route( cidr="10.0.0.1/24", gateway=model.IP( address="192.168.1.254", type="gateway"), interface="eth0"), ]), model.Subnet(dns=[ model.IP( address="192.168.1.1", type="dns"), model.IP( address="192.168.1.2", type="dns"), ], gateway=model.IP( type='gateway'), ips=[ model.FixedIP( address="192.168.1.100", ), model.FixedIP( address="192.168.1.101", ), ], routes=[ model.Route( cidr="10.0.0.1/24", gateway=model.IP( address="192.168.1.254", type="gateway"), interface="eth0"), ]), ] expect = osv_objects.subnet.SubnetList( objects=[ osv_objects.subnet.Subnet( cidr="192.168.1.0/24", dns=["192.168.1.1", "192.168.1.2"], gateway="192.168.1.254", ips=osv_objects.fixed_ip.FixedIPList( objects=[ osv_objects.fixed_ip.FixedIP( address="192.168.1.100", floating_ips=[]), osv_objects.fixed_ip.FixedIP( address="192.168.1.101", floating_ips=[]), ]), routes=osv_objects.route.RouteList( objects=[ osv_objects.route.Route( cidr="10.0.0.1/24", gateway="192.168.1.254", interface="eth0") ]), ), osv_objects.subnet.Subnet( dns=["192.168.1.1", "192.168.1.2"], ips=osv_objects.fixed_ip.FixedIPList( objects=[ osv_objects.fixed_ip.FixedIP( address="192.168.1.100", floating_ips=[]), osv_objects.fixed_ip.FixedIP( address="192.168.1.101", floating_ips=[]), ]), routes=osv_objects.route.RouteList( objects=[ osv_objects.route.Route( cidr="10.0.0.1/24", gateway="192.168.1.254", interface="eth0") ]), ), osv_objects.subnet.Subnet( dns=["192.168.1.1", "192.168.1.2"], ips=osv_objects.fixed_ip.FixedIPList( objects=[ osv_objects.fixed_ip.FixedIP( address="192.168.1.100", floating_ips=[]), osv_objects.fixed_ip.FixedIP( address="192.168.1.101", floating_ips=[]), ]), routes=osv_objects.route.RouteList( objects=[ osv_objects.route.Route( cidr="10.0.0.1/24", gateway="192.168.1.254", interface="eth0") ]), ), ]) actual = os_vif_util._nova_to_osvif_subnets(subnets) self.assertObjEqual(expect, actual) def test_nova_to_osvif_network(self): network = model.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge="br0", subnets=[ model.Subnet(cidr="192.168.1.0/24", gateway=model.IP( address="192.168.1.254", type='gateway')), ]) expect = osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge="br0", bridge_interface=None, subnets=osv_objects.subnet.SubnetList( objects=[ osv_objects.subnet.Subnet( cidr="192.168.1.0/24", dns=[], gateway="192.168.1.254", ips=osv_objects.fixed_ip.FixedIPList( objects=[]), routes=osv_objects.route.RouteList( objects=[]), ) ])) actual = os_vif_util._nova_to_osvif_network(network) self.assertObjEqual(expect, actual) def test_nova_to_osvif_network_extra(self): network = model.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge="br0", multi_host=True, should_create_bridge=True, should_create_vlan=True, bridge_interface="eth0", vlan=1729, subnets=[ model.Subnet(cidr="192.168.1.0/24", gateway=model.IP( address="192.168.1.254", type='gateway')), ]) expect = osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge="br0", multi_host=True, should_provide_bridge=True, should_provide_vlan=True, bridge_interface="eth0", vlan=1729, subnets=osv_objects.subnet.SubnetList( objects=[ osv_objects.subnet.Subnet( cidr="192.168.1.0/24", dns=[], gateway="192.168.1.254", ips=osv_objects.fixed_ip.FixedIPList( objects=[]), routes=osv_objects.route.RouteList( objects=[]), ) ])) actual = os_vif_util._nova_to_osvif_network(network) self.assertObjEqual(expect, actual) def test_nova_to_osvif_network_labeled_no_bridge(self): network = model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[ model.Subnet(cidr="192.168.1.0/24", gateway=model.IP( address="192.168.1.254", type='gateway')), ]) expect = osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[ osv_objects.subnet.Subnet( cidr="192.168.1.0/24", dns=[], gateway="192.168.1.254", ips=osv_objects.fixed_ip.FixedIPList( objects=[]), routes=osv_objects.route.RouteList( objects=[]), ) ])) actual = os_vif_util._nova_to_osvif_network(network) self.assertObjEqual(expect, actual) def test_nova_to_osvif_network_labeled_no_vlan(self): network = model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", should_create_vlan=True, subnets=[ model.Subnet(cidr="192.168.1.0/24", gateway=model.IP( address="192.168.1.254", type='gateway')), ]) self.assertRaises(exception.NovaException, os_vif_util._nova_to_osvif_network, network) def test_nova_to_osvif_network_mtu(self): network = model.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge="br0", mtu=550, subnets=[]) osv_obj = os_vif_util._nova_to_osvif_network(network) self.assertEqual(550, osv_obj.mtu) def test_nova_to_osvif_vif_linux_bridge(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_BRIDGE, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_PORT_FILTER: True, } ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFBridge( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", has_traffic_filtering=True, plugin="linux_bridge", preserve_on_delete=False, vif_name="nicdc065497-3c", network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vif_agilio_ovs_fallthrough(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_AGILIO_OVS, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_PORT_FILTER: True, }, delegate_create=False, ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFOpenVSwitch( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", has_traffic_filtering=True, plugin="ovs", port_profile=osv_objects.vif.VIFPortProfileOpenVSwitch( interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", datapath_type=None, create_port=False), preserve_on_delete=False, vif_name="nicdc065497-3c", network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vif_agilio_ovs_direct(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_AGILIO_OVS, address="22:52:25:62:e2:aa", profile={ "pci_slot": "0000:08:08.5", }, network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), vnic_type=model.VNIC_TYPE_DIRECT, ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFHostDevice( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, has_traffic_filtering=False, address="22:52:25:62:e2:aa", dev_type=osv_objects.fields.VIFHostDeviceDevType.ETHERNET, dev_address="0000:08:08.5", plugin="agilio_ovs", port_profile=osv_objects.vif.VIFPortProfileOVSRepresentor( interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", representor_name="nicdc065497-3c", representor_address="0000:08:08.5", datapath_offload=osv_objects.vif.DatapathOffloadRepresentor( representor_name="nicdc065497-3c", representor_address="0000:08:08.5")), preserve_on_delete=False, vif_name="nicdc065497-3c", network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vif_agilio_ovs_forwarder(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_AGILIO_OVS, address="22:52:25:62:e2:aa", profile={ "pci_slot": "0000:08:08.5", }, network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), vnic_type=model.VNIC_TYPE_VIRTIO_FORWARDER, details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True, model.VIF_DETAILS_VHOSTUSER_SOCKET: '/fake/socket', } ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFVHostUser( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", has_traffic_filtering=False, plugin="agilio_ovs", port_profile=osv_objects.vif.VIFPortProfileOVSRepresentor( interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", representor_address="0000:08:08.5", representor_name="nicdc065497-3c", datapath_offload=osv_objects.vif.DatapathOffloadRepresentor( representor_name="nicdc065497-3c", representor_address="0000:08:08.5")), preserve_on_delete=False, vif_name="nicdc065497-3c", path='/fake/socket', mode='client', network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vif_ovs_plain(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_OVS, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_PORT_FILTER: True, model.VIF_DETAILS_OVS_DATAPATH_TYPE: model.VIF_DETAILS_OVS_DATAPATH_SYSTEM }, delegate_create=True, ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFOpenVSwitch( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", has_traffic_filtering=True, plugin="ovs", port_profile=osv_objects.vif.VIFPortProfileOpenVSwitch( interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", datapath_type=model.VIF_DETAILS_OVS_DATAPATH_SYSTEM, create_port=True), preserve_on_delete=False, vif_name="nicdc065497-3c", network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vif_ovs_hybrid(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_OVS, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_PORT_FILTER: False, model.VIF_DETAILS_OVS_HYBRID_PLUG: True, model.VIF_DETAILS_OVS_DATAPATH_TYPE: model.VIF_DETAILS_OVS_DATAPATH_SYSTEM }, ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFBridge( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", has_traffic_filtering=False, plugin="ovs", bridge_name="qbrdc065497-3c", port_profile=osv_objects.vif.VIFPortProfileOpenVSwitch( interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", datapath_type="system"), preserve_on_delete=False, vif_name="nicdc065497-3c", network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_ovs_with_vnic_direct(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_OVS, address="22:52:25:62:e2:aa", vnic_type=model.VNIC_TYPE_DIRECT, network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), profile={'pci_slot': '0000:0a:00.1'} ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFHostDevice( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", dev_address='0000:0a:00.1', dev_type=os_vif_fields.VIFHostDeviceDevType.ETHERNET, plugin="ovs", port_profile=osv_objects.vif.VIFPortProfileOVSRepresentor( interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", representor_name="nicdc065497-3c", representor_address="0000:0a:00.1", datapath_offload=osv_objects.vif.DatapathOffloadRepresentor( representor_name="nicdc065497-3c", representor_address="0000:0a:00.1")), has_traffic_filtering=False, preserve_on_delete=False, network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_ovs_with_vnic_vdpa(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_OVS, address="22:52:25:62:e2:aa", vnic_type=model.VNIC_TYPE_VDPA, network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), profile={'pci_slot': '0000:0a:00.1'} ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFHostDevice( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", dev_address='0000:0a:00.1', dev_type=os_vif_fields.VIFHostDeviceDevType.ETHERNET, plugin="ovs", port_profile=osv_objects.vif.VIFPortProfileOVSRepresentor( interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", representor_name="nicdc065497-3c", representor_address="0000:0a:00.1", datapath_offload=osv_objects.vif.DatapathOffloadRepresentor( representor_name="nicdc065497-3c", representor_address="0000:0a:00.1")), has_traffic_filtering=False, preserve_on_delete=False, network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vhostuser_ovs(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VHOSTUSER, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True, model.VIF_DETAILS_VHOSTUSER_SOCKET: '/fake/socket', model.VIF_DETAILS_PORT_FILTER: True, model.VIF_DETAILS_OVS_DATAPATH_TYPE: model.VIF_DETAILS_OVS_DATAPATH_SYSTEM }, ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFVHostUser( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", plugin="ovs", port_profile=osv_objects.vif.VIFPortProfileOpenVSwitch( interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", datapath_type=model.VIF_DETAILS_OVS_DATAPATH_SYSTEM), vif_name="vhudc065497-3c", path='/fake/socket', mode='client', has_traffic_filtering=True, preserve_on_delete=False, network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vhostuser_ovs_no_socket_path(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VHOSTUSER, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True, model.VIF_DETAILS_PORT_FILTER: True } ) self.assertRaises(exception.VifDetailsMissingVhostuserSockPath, os_vif_util.nova_to_osvif_vif, vif) def test_nova_to_osvif_vhostuser_non_ovs(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, type=model.VIF_TYPE_VHOSTUSER, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: False, model.VIF_DETAILS_VHOSTUSER_SOCKET: '/fake/socket' } ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFVHostUser( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", plugin="noop", vif_name="nicdc065497-3c", path='/fake/socket', mode='client', has_traffic_filtering=False, preserve_on_delete=False, network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", mtu=None, subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vhostuser_fp_ovs_hybrid(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VHOSTUSER, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", mtu="1500", subnets=[]), details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_SOCKET: '/fake/socket', model.VIF_DETAILS_VHOSTUSER_FP_PLUG: True, model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True, model.VIF_DETAILS_OVS_HYBRID_PLUG: True, model.VIF_DETAILS_PORT_FILTER: False, model.VIF_DETAILS_OVS_DATAPATH_TYPE: model.VIF_DETAILS_OVS_DATAPATH_SYSTEM }, ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFVHostUser( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", plugin="vhostuser_fp", port_profile=osv_objects.vif.VIFPortProfileFPOpenVSwitch( interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", bridge_name="qbrdc065497-3c", hybrid_plug=True, datapath_type=model.VIF_DETAILS_OVS_DATAPATH_SYSTEM), vif_name="nicdc065497-3c", path='/fake/socket', mode='client', has_traffic_filtering=False, preserve_on_delete=False, network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", mtu="1500", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vhostuser_fp_ovs_plain(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VHOSTUSER, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", mtu="1500", bridge="br-int", subnets=[]), details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_SOCKET: '/fake/socket', model.VIF_DETAILS_VHOSTUSER_FP_PLUG: True, model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True, model.VIF_DETAILS_OVS_HYBRID_PLUG: False, model.VIF_DETAILS_PORT_FILTER: True, model.VIF_DETAILS_OVS_DATAPATH_TYPE: model.VIF_DETAILS_OVS_DATAPATH_SYSTEM }, ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFVHostUser( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", plugin="vhostuser_fp", port_profile=osv_objects.vif.VIFPortProfileFPOpenVSwitch( interface_id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", bridge_name="br-int", hybrid_plug=False, datapath_type=model.VIF_DETAILS_OVS_DATAPATH_SYSTEM), vif_name="nicdc065497-3c", path='/fake/socket', mode='client', has_traffic_filtering=True, preserve_on_delete=False, network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", mtu="1500", bridge="br-int", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vhostuser_fp_lb(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VHOSTUSER, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", mtu="1500", bridge="brq12345", subnets=[]), details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_SOCKET: '/fake/socket', model.VIF_DETAILS_VHOSTUSER_FP_PLUG: True, model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: False, } ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFVHostUser( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", plugin="vhostuser_fp", port_profile=osv_objects.vif.VIFPortProfileFPBridge( bridge_name="brq12345"), vif_name="nicdc065497-3c", path='/fake/socket', mode='client', has_traffic_filtering=False, preserve_on_delete=False, network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", mtu="1500", bridge="brq12345", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vhostuser_fp_no_socket_path(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VHOSTUSER, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_FP_PLUG: True, model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: False, model.VIF_DETAILS_PORT_FILTER: True, } ) self.assertRaises(exception.VifDetailsMissingVhostuserSockPath, os_vif_util.nova_to_osvif_vif, vif) def test_nova_to_osvif_vif_ivs_plain(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_IVS, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_PORT_FILTER: True, } ) actual = os_vif_util.nova_to_osvif_vif(vif) # expected vif_name is nic + vif_id, with total length 14 chars expected_vif_name = 'nicdc065497-3c' self.assertIsInstance(actual, osv_objects.vif.VIFGeneric) self.assertEqual(expected_vif_name, actual.vif_name) def test_nova_to_osvif_vif_ivs_bridged(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_IVS, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_PORT_FILTER: True, model.VIF_DETAILS_OVS_HYBRID_PLUG: True, } ) actual = os_vif_util.nova_to_osvif_vif(vif) # expected vif_name is nic + vif_id, with total length 14 chars expected_vif_name = 'nicdc065497-3c' self.assertIsInstance(actual, osv_objects.vif.VIFBridge) self.assertEqual(expected_vif_name, actual.vif_name) def test_nova_to_osvif_vif_unknown(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type="wibble", address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), ) ex = self.assertRaises(exception.NovaException, os_vif_util.nova_to_osvif_vif, vif) self.assertIn('Unsupported VIF type wibble', str(ex)) def test_nova_to_osvif_vif_binding_failed(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type="binding_failed", address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]),) self.assertIsNone(os_vif_util.nova_to_osvif_vif(vif)) def test_nova_to_osvif_vif_unbound(self): vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type="unbound", address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]),) self.assertIsNone(os_vif_util.nova_to_osvif_vif(vif)) def test_nova_to_osvif_contrail_vrouter(self): """Test for the Contrail / Tungsten Fabric DPDK datapath.""" vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VHOSTUSER, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_VROUTER_PLUG: True, model.VIF_DETAILS_VHOSTUSER_SOCKET: '/fake/socket', } ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFVHostUser( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", plugin="contrail_vrouter", vif_name="nicdc065497-3c", path='/fake/socket', mode='client', has_traffic_filtering=False, preserve_on_delete=False, network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_contrail_vrouter_no_socket_path(self): """Test for the Contrail / Tungsten Fabric DPDK datapath.""" vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VHOSTUSER, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_VROUTER_PLUG: True, } ) self.assertRaises(exception.VifDetailsMissingVhostuserSockPath, os_vif_util.nova_to_osvif_vif, vif) def test_nova_to_osvif_vrouter(self): """Test for the Contrail / Tungsten Fabric kernel datapath.""" vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VROUTER, address="22:52:25:62:e2:aa", network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFGeneric( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", plugin="vrouter", vif_name="nicdc065497-3c", has_traffic_filtering=False, preserve_on_delete=False, network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vrouter_direct(self): """Test for Contrail / Tungsten Fabric direct offloaded datapath.""" vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VROUTER, address="22:52:25:62:e2:aa", profile={ "pci_slot": "0000:08:08.5", }, network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), vnic_type=model.VNIC_TYPE_DIRECT, ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFHostDevice( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, has_traffic_filtering=False, address="22:52:25:62:e2:aa", dev_type=osv_objects.fields.VIFHostDeviceDevType.ETHERNET, dev_address="0000:08:08.5", plugin="vrouter", port_profile=osv_objects.vif.VIFPortProfileBase( datapath_offload=osv_objects.vif.DatapathOffloadRepresentor( representor_name="nicdc065497-3c", representor_address="0000:08:08.5") ), preserve_on_delete=False, vif_name="nicdc065497-3c", network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual) def test_nova_to_osvif_vrouter_forwarder(self): """Test for Contrail / Tungsten Fabric indirect offloaded datapath.""" vif = model.VIF( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", type=model.VIF_TYPE_VROUTER, address="22:52:25:62:e2:aa", profile={ "pci_slot": "0000:08:08.5", }, network=model.Network( id="b82c1929-051e-481d-8110-4669916c7915", label="Demo Net", subnets=[]), details={ model.VIF_DETAILS_VHOSTUSER_MODE: 'client', model.VIF_DETAILS_VHOSTUSER_SOCKET: '/fake/socket', }, vnic_type=model.VNIC_TYPE_VIRTIO_FORWARDER, ) actual = os_vif_util.nova_to_osvif_vif(vif) expect = osv_objects.vif.VIFVHostUser( id="dc065497-3c8d-4f44-8fb4-e1d33c16a536", active=False, address="22:52:25:62:e2:aa", plugin="vrouter", vif_name="nicdc065497-3c", path='/fake/socket', mode='client', has_traffic_filtering=False, port_profile=osv_objects.vif.VIFPortProfileBase( datapath_offload=osv_objects.vif.DatapathOffloadRepresentor( representor_address="0000:08:08.5", representor_name="nicdc065497-3c") ), preserve_on_delete=False, network=osv_objects.network.Network( id="b82c1929-051e-481d-8110-4669916c7915", bridge_interface=None, label="Demo Net", subnets=osv_objects.subnet.SubnetList( objects=[]))) self.assertObjEqual(expect, actual)
apache-2.0
andrewsomething/python-digitalocean
digitalocean/tests/test_load_balancer.py
2
12023
import json import unittest import responses import digitalocean from .BaseTest import BaseTest class TestLoadBalancer(BaseTest): def setUp(self): super(TestLoadBalancer, self).setUp() self.lb_id = '4de7ac8b-495b-4884-9a69-1050c6793cd6' self.lb = digitalocean.LoadBalancer(id=self.lb_id, token=self.token) @responses.activate def test_load(self): data = self.load_from_file('loadbalancer/single.json') url = self.base_url + 'load_balancers/' + self.lb_id responses.add(responses.GET, url, body=data, status=200, content_type='application/json') self.lb.load() rules = self.lb.forwarding_rules self.assert_get_url_equal(responses.calls[0].request.url, url) self.assertEqual(self.lb.id, self.lb_id) self.assertEqual(self.lb.region['slug'], 'nyc3') self.assertEqual(self.lb.algorithm, 'round_robin') self.assertEqual(self.lb.ip, '104.131.186.241') self.assertEqual(self.lb.name, 'example-lb-01') self.assertEqual(len(rules), 2) self.assertEqual(rules[0].entry_protocol, 'http') self.assertEqual(rules[0].entry_port, 80) self.assertEqual(rules[0].target_protocol, 'http') self.assertEqual(rules[0].target_port, 80) self.assertEqual(rules[0].tls_passthrough, False) self.assertEqual(self.lb.health_check.protocol, 'http') self.assertEqual(self.lb.health_check.port, 80) self.assertEqual(self.lb.sticky_sessions.type, 'none') self.assertEqual(self.lb.droplet_ids, [3164444, 3164445]) @responses.activate def test_create_ids(self): data = self.load_from_file('loadbalancer/single.json') url = self.base_url + "load_balancers/" responses.add(responses.POST, url, body=data, status=201, content_type='application/json') rule1 = digitalocean.ForwardingRule(entry_port=80, entry_protocol='http', target_port=80, target_protocol='http') rule2 = digitalocean.ForwardingRule(entry_port=443, entry_protocol='https', target_port=443, target_protocol='https', tls_passthrough=True) check = digitalocean.HealthCheck() sticky = digitalocean.StickySesions(type='none') lb = digitalocean.LoadBalancer(name='example-lb-01', region='nyc3', algorithm='round_robin', forwarding_rules=[rule1, rule2], health_check=check, sticky_sessions=sticky, redirect_http_to_https=False, droplet_ids=[3164444, 3164445], token=self.token).create() resp_rules = lb.forwarding_rules self.assert_url_query_equal(responses.calls[0].request.url, url) self.assertEqual(lb.id, self.lb_id) self.assertEqual(lb.algorithm, 'round_robin') self.assertEqual(lb.ip, '104.131.186.241') self.assertEqual(lb.name, 'example-lb-01') self.assertEqual(len(resp_rules), 2) self.assertEqual(resp_rules[0].entry_protocol, 'http') self.assertEqual(resp_rules[0].entry_port, 80) self.assertEqual(resp_rules[0].target_protocol, 'http') self.assertEqual(resp_rules[0].target_port, 80) self.assertEqual(resp_rules[0].tls_passthrough, False) self.assertEqual(lb.health_check.protocol, 'http') self.assertEqual(lb.health_check.port, 80) self.assertEqual(lb.sticky_sessions.type, 'none') self.assertEqual(lb.droplet_ids, [3164444, 3164445]) @responses.activate def test_create_tag(self): data = self.load_from_file('loadbalancer/single_tag.json') url = self.base_url + "load_balancers/" responses.add(responses.POST, url, body=data, status=201, content_type='application/json') rule1 = digitalocean.ForwardingRule(entry_port=80, entry_protocol='http', target_port=80, target_protocol='http') rule2 = digitalocean.ForwardingRule(entry_port=443, entry_protocol='https', target_port=443, target_protocol='https', tls_passthrough=True) check = digitalocean.HealthCheck() sticky = digitalocean.StickySesions(type='none') lb = digitalocean.LoadBalancer(name='example-lb-01', region='nyc3', algorithm='round_robin', forwarding_rules=[rule1, rule2], health_check=check, sticky_sessions=sticky, redirect_http_to_https=False, tag='web', token=self.token).create() resp_rules = lb.forwarding_rules self.assertEqual(responses.calls[0].request.url, self.base_url + 'load_balancers/') self.assertEqual(lb.id, '4de2ac7b-495b-4884-9e69-1050d6793cd4') self.assertEqual(lb.algorithm, 'round_robin') self.assertEqual(lb.ip, '104.131.186.248') self.assertEqual(lb.name, 'example-lb-01') self.assertEqual(len(resp_rules), 2) self.assertEqual(resp_rules[0].entry_protocol, 'http') self.assertEqual(resp_rules[0].entry_port, 80) self.assertEqual(resp_rules[0].target_protocol, 'http') self.assertEqual(resp_rules[0].target_port, 80) self.assertEqual(resp_rules[0].tls_passthrough, False) self.assertEqual(lb.health_check.protocol, 'http') self.assertEqual(lb.health_check.port, 80) self.assertEqual(lb.sticky_sessions.type, 'none') self.assertEqual(lb.tag, 'web') self.assertEqual(lb.droplet_ids, [3164444, 3164445]) @responses.activate def test_create_exception(self): data = self.load_from_file('loadbalancer/single_tag.json') url = self.base_url + "load_balancers/" responses.add(responses.POST, url, body=data, status=201, content_type='application/json') rule = digitalocean.ForwardingRule(entry_port=80, entry_protocol='http', target_port=80, target_protocol='http') check = digitalocean.HealthCheck() sticky = digitalocean.StickySesions(type='none') lb = digitalocean.LoadBalancer(name='example-lb-01', region='nyc3', algorithm='round_robin', forwarding_rules=[rule], health_check=check, sticky_sessions=sticky, redirect_http_to_https=False, tag='web', droplet_ids=[123456, 789456], token=self.token) with self.assertRaises(ValueError) as context: lb.create() self.assertEqual('droplet_ids and tag are mutually exclusive args', str(context.exception)) @responses.activate def test_destroy(self): url = '{0}load_balancers/{1}/'.format(self.base_url, self.lb_id) responses.add(responses.DELETE, url, status=204, content_type='application/json') self.lb.destroy() self.assertEqual(responses.calls[0].request.url, url) @responses.activate def test_add_droplets(self): url = '{0}load_balancers/{1}/droplets/'.format(self.base_url, self.lb_id) responses.add(responses.POST, url, status=204, content_type='application/json') self.lb.add_droplets([12345, 78945]) body = '{"droplet_ids": [12345, 78945]}' self.assertEqual(responses.calls[0].request.url, url) self.assertEqual(responses.calls[0].request.body, body) @responses.activate def test_remove_droplets(self): url = '{0}load_balancers/{1}/droplets/'.format(self.base_url, self.lb_id) responses.add(responses.DELETE, url, status=204, content_type='application/json') self.lb.remove_droplets([12345, 78945]) body = '{"droplet_ids": [12345, 78945]}' self.assertEqual(responses.calls[0].request.url, url) self.assertEqual(responses.calls[0].request.body, body) @responses.activate def test_add_forwarding_rules(self): url = '{0}load_balancers/{1}/forwarding_rules/'.format(self.base_url, self.lb_id) responses.add(responses.POST, url, status=204, content_type='application/json') rule = digitalocean.ForwardingRule(entry_port=3306, entry_protocol='tcp', target_port=3306, target_protocol='tcp') self.lb.add_forwarding_rules([rule]) req_body = json.loads("""{ "forwarding_rules": [ { "entry_protocol": "tcp", "entry_port": 3306, "target_protocol": "tcp", "target_port": 3306, "certificate_id": "", "tls_passthrough": false } ] }""") body = json.loads(responses.calls[0].request.body) self.assertEqual(responses.calls[0].request.url, url) self.assertEqual(sorted(body.items()), sorted(req_body.items())) @responses.activate def test_remove_forwarding_rules(self): url = '{0}load_balancers/{1}/forwarding_rules/'.format(self.base_url, self.lb_id) responses.add(responses.DELETE, url, status=204, content_type='application/json') rule = digitalocean.ForwardingRule(entry_port=3306, entry_protocol='tcp', target_port=3306, target_protocol='tcp') self.lb.remove_forwarding_rules([rule]) req_body = json.loads("""{ "forwarding_rules": [ { "entry_protocol": "tcp", "entry_port": 3306, "target_protocol": "tcp", "target_port": 3306, "certificate_id": "", "tls_passthrough": false } ] }""") body = json.loads(responses.calls[0].request.body) self.assertEqual(responses.calls[0].request.url, url) self.assertEqual(sorted(body.items()), sorted(req_body.items())) if __name__ == '__main__': unittest.main()
lgpl-3.0
ketjow4/NOV
Lib/site-packages/numpy/distutils/command/build_py.py
89
1125
from distutils.command.build_py import build_py as old_build_py from numpy.distutils.misc_util import is_string class build_py(old_build_py): def run(self): build_src = self.get_finalized_command('build_src') if build_src.py_modules_dict and self.packages is None: self.packages = build_src.py_modules_dict.keys () old_build_py.run(self) def find_package_modules(self, package, package_dir): modules = old_build_py.find_package_modules(self, package, package_dir) # Find build_src generated *.py files. build_src = self.get_finalized_command('build_src') modules += build_src.py_modules_dict.get(package,[]) return modules def find_modules(self): old_py_modules = self.py_modules[:] new_py_modules = filter(is_string, self.py_modules) self.py_modules[:] = new_py_modules modules = old_build_py.find_modules(self) self.py_modules[:] = old_py_modules return modules # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple # and item[2] is source file.
gpl-3.0
marcsans/cnn-physics-perception
phy/lib/python2.7/site-packages/sklearn/linear_model/perceptron.py
39
3863
# Author: Mathieu Blondel # License: BSD 3 clause from .stochastic_gradient import BaseSGDClassifier from ..feature_selection.from_model import _LearntSelectorMixin class Perceptron(BaseSGDClassifier, _LearntSelectorMixin): """Perceptron Read more in the :ref:`User Guide <perceptron>`. Parameters ---------- penalty : None, 'l2' or 'l1' or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to None. alpha : float Constant that multiplies the regularization term if regularization is used. Defaults to 0.0001 fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). Defaults to 5. shuffle : bool, optional, default True Whether or not the training data should be shuffled after each epoch. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level n_jobs : integer, optional The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. -1 means 'all CPUs'. Defaults to 1. eta0 : double Constant by which the updates are multiplied. Defaults to 1. class_weight : dict, {class_label: weight} or "balanced" or None, optional Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. Attributes ---------- coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\ n_features] Weights assigned to the features. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. Notes ----- `Perceptron` and `SGDClassifier` share the same underlying implementation. In fact, `Perceptron()` is equivalent to `SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant", penalty=None)`. See also -------- SGDClassifier References ---------- https://en.wikipedia.org/wiki/Perceptron and references therein. """ def __init__(self, penalty=None, alpha=0.0001, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, eta0=1.0, n_jobs=1, random_state=0, class_weight=None, warm_start=False): super(Perceptron, self).__init__(loss="perceptron", penalty=penalty, alpha=alpha, l1_ratio=0, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, random_state=random_state, learning_rate="constant", eta0=eta0, power_t=0.5, warm_start=warm_start, class_weight=class_weight, n_jobs=n_jobs)
mit
Arcanemagus/SickRage
lib/requests/adapters.py
69
20836
# -*- coding: utf-8 -*- """ requests.adapters ~~~~~~~~~~~~~~~~~ This module contains the transport adapters that Requests uses to define and maintain connections. """ import os.path import socket from urllib3.poolmanager import PoolManager, proxy_from_url from urllib3.response import HTTPResponse from urllib3.util import Timeout as TimeoutSauce from urllib3.util.retry import Retry from urllib3.exceptions import ClosedPoolError from urllib3.exceptions import ConnectTimeoutError from urllib3.exceptions import HTTPError as _HTTPError from urllib3.exceptions import MaxRetryError from urllib3.exceptions import NewConnectionError from urllib3.exceptions import ProxyError as _ProxyError from urllib3.exceptions import ProtocolError from urllib3.exceptions import ReadTimeoutError from urllib3.exceptions import SSLError as _SSLError from urllib3.exceptions import ResponseError from .models import Response from .compat import urlparse, basestring from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, prepend_scheme_if_needed, get_auth_from_url, urldefragauth, select_proxy) from .structures import CaseInsensitiveDict from .cookies import extract_cookies_to_jar from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, ProxyError, RetryError, InvalidSchema) from .auth import _basic_auth_str try: from urllib3.contrib.socks import SOCKSProxyManager except ImportError: def SOCKSProxyManager(*args, **kwargs): raise InvalidSchema("Missing dependencies for SOCKS support.") DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 DEFAULT_POOL_TIMEOUT = None class BaseAdapter(object): """The Base Transport Adapter""" def __init__(self): super(BaseAdapter, self).__init__() def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. """ raise NotImplementedError def close(self): """Cleans up adapter specific items.""" raise NotImplementedError class HTTPAdapter(BaseAdapter): """The built-in HTTP Adapter for urllib3. Provides a general-case interface for Requests sessions to contact HTTP and HTTPS urls by implementing the Transport Adapter interface. This class will usually be created by the :class:`Session <Session>` class under the covers. :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. :param max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. :param pool_block: Whether the connection pool should block for connections. Usage:: >>> import requests >>> s = requests.Session() >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', '_pool_block'] def __init__(self, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, pool_block=DEFAULT_POOLBLOCK): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: self.max_retries = Retry.from_int(max_retries) self.config = {} self.proxy_manager = {} super(HTTPAdapter, self).__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize self._pool_block = pool_block self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) def __getstate__(self): return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) def __setstate__(self, state): # Can't handle by adding 'proxy_manager' to self.__attrs__ because # self.poolmanager uses a lambda function, which isn't pickleable. self.proxy_manager = {} self.config = {} for attr, value in state.items(): setattr(self, attr, value) self.init_poolmanager(self._pool_connections, self._pool_maxsize, block=self._pool_block) def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. """ # save these values for pickling self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs) def proxy_manager_for(self, proxy, **proxy_kwargs): """Return urllib3 ProxyManager for the given proxy. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxy: The proxy to return a urllib3 ProxyManager for. :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. :returns: ProxyManager :rtype: urllib3.ProxyManager """ if proxy in self.proxy_manager: manager = self.proxy_manager[proxy] elif proxy.lower().startswith('socks'): username, password = get_auth_from_url(proxy) manager = self.proxy_manager[proxy] = SOCKSProxyManager( proxy, username=username, password=password, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs ) else: proxy_headers = self.proxy_headers(proxy) manager = self.proxy_manager[proxy] = proxy_from_url( proxy, proxy_headers=proxy_headers, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs) return manager def cert_verify(self, conn, url, verify, cert): """Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: The SSL certificate to verify. """ if url.lower().startswith('https') and verify: cert_loc = None # Allow self-specified cert location. if verify is not True: cert_loc = verify if not cert_loc: cert_loc = DEFAULT_CA_BUNDLE_PATH if not cert_loc or not os.path.exists(cert_loc): raise IOError("Could not find a suitable TLS CA certificate bundle, " "invalid path: {0}".format(cert_loc)) conn.cert_reqs = 'CERT_REQUIRED' if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: conn.cert_reqs = 'CERT_NONE' conn.ca_certs = None conn.ca_cert_dir = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): raise IOError("Could not find the TLS certificate file, " "invalid path: {0}".format(conn.cert_file)) if conn.key_file and not os.path.exists(conn.key_file): raise IOError("Could not find the TLS key file, " "invalid path: {0}".format(conn.key_file)) def build_response(self, req, resp): """Builds a :class:`Response <requests.Response>` object from a urllib3 response. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>` :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response. :param resp: The urllib3 response object. :rtype: requests.Response """ response = Response() # Fallback to None if there's no status_code, for whatever reason. response.status_code = getattr(resp, 'status', None) # Make headers case-insensitive. response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) # Set encoding. response.encoding = get_encoding_from_headers(response.headers) response.raw = resp response.reason = response.raw.reason if isinstance(req.url, bytes): response.url = req.url.decode('utf-8') else: response.url = req.url # Add new cookies from the server. extract_cookies_to_jar(response.cookies, req, resp) # Give the Response some context. response.request = req response.connection = self return response def get_connection(self, url, proxies=None): """Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. :rtype: urllib3.ConnectionPool """ proxy = select_proxy(url, proxies) if proxy: proxy = prepend_scheme_if_needed(proxy, 'http') proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: # Only scheme should be lower case parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn def close(self): """Disposes of any internal state. Currently, this closes the PoolManager and any active ProxyManager, which closes any pooled connections. """ self.poolmanager.clear() for proxy in self.proxy_manager.values(): proxy.clear() def request_url(self, request, proxies): """Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str """ proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme is_proxied_http_request = (proxy and scheme != 'https') using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() using_socks_proxy = proxy_scheme.startswith('socks') url = request.path_url if is_proxied_http_request and not using_socks_proxy: url = urldefragauth(request.url) return url def add_headers(self, request, **kwargs): """Add any headers needed by the connection. As of v2.0 this does nothing by default, but is left for overriding by users that subclass the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. :param kwargs: The keyword arguments from the call to send(). """ pass def proxy_headers(self, proxy): """Returns a dictionary of the headers to add to any request sent through a proxy. This works with urllib3 magic to ensure that they are correctly sent to the proxy, rather than in a tunnelled request if CONNECT is being used. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxies: The url of the proxy being used for this request. :rtype: dict """ headers = {} username, password = get_auth_from_url(proxy) if username: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return headers def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ conn = self.get_connection(request.url, proxies) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers(request) chunked = not (request.body is None or 'Content-Length' in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError as e: # this may raise a string formatting error. err = ("Invalid timeout {0}. Pass a (connect, read) " "timeout tuple, or a single float to set " "both timeouts to the same value".format(timeout)) raise ValueError(err) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout ) # Send the request. else: if hasattr(conn, 'proxy_pool'): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) try: low_conn.putrequest(request.method, url, skip_accept_encoding=True) for header, value in request.headers.items(): low_conn.putheader(header, value) low_conn.endheaders() for i in request.body: low_conn.send(hex(len(i))[2:].encode('utf-8')) low_conn.send(b'\r\n') low_conn.send(i) low_conn.send(b'\r\n') low_conn.send(b'0\r\n\r\n') # Receive the response from the server try: # For Python 2.7+ versions, use buffering of HTTP # responses r = low_conn.getresponse(buffering=True) except TypeError: # For compatibility with Python 2.6 versions and back r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, decode_content=False ) except: # If we hit any problems here, clean up the connection. # Then, reraise so that we can handle the actual exception. low_conn.close() raise except (ProtocolError, socket.error) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): # TODO: Remove this in 3.0.0: see #2811 if not isinstance(e.reason, NewConnectionError): raise ConnectTimeout(e, request=request) if isinstance(e.reason, ResponseError): raise RetryError(e, request=request) if isinstance(e.reason, _ProxyError): raise ProxyError(e, request=request) if isinstance(e.reason, _SSLError): # This branch is for urllib3 v1.22 and later. raise SSLError(e, request=request) raise ConnectionError(e, request=request) except ClosedPoolError as e: raise ConnectionError(e, request=request) except _ProxyError as e: raise ProxyError(e) except (_SSLError, _HTTPError) as e: if isinstance(e, _SSLError): # This branch is for urllib3 versions earlier than v1.22 raise SSLError(e, request=request) elif isinstance(e, ReadTimeoutError): raise ReadTimeout(e, request=request) else: raise return self.build_response(request, resp)
gpl-3.0
Bloomie/murano-agent
muranoagent/common/messaging/mqclient.py
1
3280
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import anyjson import ssl as ssl_module from eventlet import patcher kombu = patcher.import_patched('kombu') from subscription import Subscription class MqClient(object): def __init__(self, login, password, host, port, virtual_host, ssl=False, ca_certs=None): ssl_params = None if ssl is True: ssl_params = { 'ca_certs': ca_certs, 'cert_reqs': ssl_module.CERT_REQUIRED } self._connection = kombu.Connection( 'amqp://{0}:{1}@{2}:{3}/{4}'.format( login, password, host, port, virtual_host ), ssl=ssl_params ) self._channel = None self._connected = False def __enter__(self): self.connect() return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() return False def connect(self): self._connection.connect() self._channel = self._connection.channel() self._connected = True def close(self): self._connection.close() self._connected = False def declare(self, queue, exchange='', enable_ha=False, ttl=0): if not self._connected: raise RuntimeError('Not connected to RabbitMQ') queue_arguments = {} if enable_ha is True: # To use mirrored queues feature in RabbitMQ 2.x # we need to declare this policy on the queue itself. # # Warning: this option has no effect on RabbitMQ 3.X, # to enable mirrored queues feature in RabbitMQ 3.X, please # configure RabbitMQ. queue_arguments['x-ha-policy'] = 'all' if ttl > 0: queue_arguments['x-expires'] = ttl exchange = kombu.Exchange(exchange, type='direct', durable=True) queue = kombu.Queue(queue, exchange, queue, durable=True, queue_arguments=queue_arguments) bound_queue = queue(self._connection) bound_queue.declare() def send(self, message, key, exchange=''): if not self._connected: raise RuntimeError('Not connected to RabbitMQ') producer = kombu.Producer(self._connection) producer.publish( exchange=str(exchange), routing_key=str(key), body=anyjson.dumps(message.body), message_id=str(message.id) ) def open(self, queue, prefetch_count=1): if not self._connected: raise RuntimeError('Not connected to RabbitMQ') return Subscription(self._connection, queue, prefetch_count)
apache-2.0
HyacinthBathan/FinalProject
tailbone/geoip/__init__.py
34
1044
# Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tailbone import as_json from tailbone import BaseHandler from tailbone import DEBUG import webapp2 class GeoIPHandler(BaseHandler): @as_json def get(self): resp = {} for x in ["Country", "Region", "City", "CityLatLong"]: k = "X-AppEngine-" + x resp[x] = self.request.headers.get(k) resp["IP"] = self.request.remote_addr return resp app = webapp2.WSGIApplication([ (r".*", GeoIPHandler), ], debug=DEBUG)
apache-2.0
hsaputra/tensorflow
tensorflow/python/kernel_tests/ctc_decoder_ops_test.py
103
8844
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ctc_ops.ctc_loss_op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from six.moves import zip_longest from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import ctc_ops from tensorflow.python.platform import test def grouper(iterable, n, fillvalue=None): """Collect data into fixed-length chunks or blocks.""" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) def flatten(list_of_lists): """Flatten one level of nesting.""" return itertools.chain.from_iterable(list_of_lists) class CTCGreedyDecoderTest(test.TestCase): def _testCTCDecoder(self, decoder, inputs, seq_lens, log_prob_truth, decode_truth, expected_err_re=None, **decoder_args): inputs_t = [ops.convert_to_tensor(x) for x in inputs] # convert inputs_t into a [max_time x batch_size x depth] tensor # from a len time python list of [batch_size x depth] tensors inputs_t = array_ops.stack(inputs_t) with self.test_session(use_gpu=False) as sess: decoded_list, log_probability = decoder( inputs_t, sequence_length=seq_lens, **decoder_args) decoded_unwrapped = list( flatten([(st.indices, st.values, st.dense_shape) for st in decoded_list])) if expected_err_re is None: outputs = sess.run(decoded_unwrapped + [log_probability]) # Group outputs into (ix, vals, shape) tuples output_sparse_tensors = list(grouper(outputs[:-1], 3)) output_log_probability = outputs[-1] # Check the number of decoded outputs (top_paths) match self.assertEqual(len(output_sparse_tensors), len(decode_truth)) # For each SparseTensor tuple, compare (ix, vals, shape) for out_st, truth_st, tf_st in zip(output_sparse_tensors, decode_truth, decoded_list): self.assertAllEqual(out_st[0], truth_st[0]) # ix self.assertAllEqual(out_st[1], truth_st[1]) # vals self.assertAllEqual(out_st[2], truth_st[2]) # shape # Compare the shapes of the components with the truth. The # `None` elements are not known statically. self.assertEqual([None, truth_st[0].shape[1]], tf_st.indices.get_shape().as_list()) self.assertEqual([None], tf_st.values.get_shape().as_list()) self.assertShapeEqual(truth_st[2], tf_st.dense_shape) # Make sure decoded probabilities match self.assertAllClose(output_log_probability, log_prob_truth, atol=1e-6) else: with self.assertRaisesOpError(expected_err_re): sess.run(decoded_unwrapped + [log_probability]) def testCTCGreedyDecoder(self): """Test two batch entries - best path decoder.""" max_time_steps = 6 # depth == 4 seq_len_0 = 4 input_prob_matrix_0 = np.asarray( [[1.0, 0.0, 0.0, 0.0], # t=0 [0.0, 0.0, 0.4, 0.6], # t=1 [0.0, 0.0, 0.4, 0.6], # t=2 [0.0, 0.9, 0.1, 0.0], # t=3 [0.0, 0.0, 0.0, 0.0], # t=4 (ignored) [0.0, 0.0, 0.0, 0.0]], # t=5 (ignored) dtype=np.float32) input_log_prob_matrix_0 = np.log(input_prob_matrix_0) seq_len_1 = 5 # dimensions are time x depth input_prob_matrix_1 = np.asarray( [ [0.1, 0.9, 0.0, 0.0], # t=0 [0.0, 0.9, 0.1, 0.0], # t=1 [0.0, 0.0, 0.1, 0.9], # t=2 [0.0, 0.9, 0.1, 0.1], # t=3 [0.9, 0.1, 0.0, 0.0], # t=4 [0.0, 0.0, 0.0, 0.0] ], # t=5 (ignored) dtype=np.float32) input_log_prob_matrix_1 = np.log(input_prob_matrix_1) # len max_time_steps array of batch_size x depth matrices inputs = [ np.vstack( [input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]]) for t in range(max_time_steps) ] # batch_size length vector of sequence_lengths seq_lens = np.array([seq_len_0, seq_len_1], dtype=np.int32) # batch_size length vector of negative log probabilities log_prob_truth = np.array([ np.sum(-np.log([1.0, 0.6, 0.6, 0.9])), np.sum(-np.log([0.9, 0.9, 0.9, 0.9, 0.9])) ], np.float32)[:, np.newaxis] # decode_truth: one SparseTensor (ix, vals, shape) decode_truth = [ ( np.array( [ [0, 0], # batch 0, 2 outputs [0, 1], [1, 0], # batch 1, 3 outputs [1, 1], [1, 2] ], dtype=np.int64), np.array( [ 0, 1, # batch 0 1, 1, 0 ], # batch 1 dtype=np.int64), # shape is batch x max_decoded_length np.array( [2, 3], dtype=np.int64)), ] self._testCTCDecoder(ctc_ops.ctc_greedy_decoder, inputs, seq_lens, log_prob_truth, decode_truth) def testCTCDecoderBeamSearch(self): """Test one batch, two beams - hibernating beam search.""" # max_time_steps == 8 depth = 6 seq_len_0 = 5 input_prob_matrix_0 = np.asarray( [ [0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908], [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517], [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763], [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655], [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878], # Random entry added in at time=5 [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671] ], dtype=np.float32) # Add arbitrary offset - this is fine input_log_prob_matrix_0 = np.log(input_prob_matrix_0) + 2.0 # len max_time_steps array of batch_size x depth matrices inputs = ([ input_log_prob_matrix_0[t, :][np.newaxis, :] for t in range(seq_len_0) ] # Pad to max_time_steps = 8 + 2 * [np.zeros( (1, depth), dtype=np.float32)]) # batch_size length vector of sequence_lengths seq_lens = np.array([seq_len_0], dtype=np.int32) # batch_size length vector of negative log probabilities log_prob_truth = np.array( [ 0.584855, # output beam 0 0.389139 # output beam 1 ], np.float32)[np.newaxis, :] # decode_truth: two SparseTensors, (ix, values, shape) decode_truth = [ # beam 0, batch 0, two outputs decoded (np.array( [[0, 0], [0, 1]], dtype=np.int64), np.array( [1, 0], dtype=np.int64), np.array( [1, 2], dtype=np.int64)), # beam 1, batch 0, three outputs decoded (np.array( [[0, 0], [0, 1], [0, 2]], dtype=np.int64), np.array( [0, 1, 0], dtype=np.int64), np.array( [1, 3], dtype=np.int64)), ] # Test correct decoding. self._testCTCDecoder( ctc_ops.ctc_beam_search_decoder, inputs, seq_lens, log_prob_truth, decode_truth, beam_width=2, top_paths=2) # Requesting more paths than the beam width allows. with self.assertRaisesRegexp(errors.InvalidArgumentError, (".*requested more paths than the beam " "width.*")): self._testCTCDecoder( ctc_ops.ctc_beam_search_decoder, inputs, seq_lens, log_prob_truth, decode_truth, beam_width=2, top_paths=3) if __name__ == "__main__": test.main()
apache-2.0
tbeadle/docker-py
tests/integration/exec_test.py
12
4673
import pytest from .. import helpers BUSYBOX = helpers.BUSYBOX class ExecTest(helpers.BaseTestCase): def test_execute_command(self): if not helpers.exec_driver_is_native(): pytest.skip('Exec driver not native') container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) res = self.client.exec_create(id, ['echo', 'hello']) self.assertIn('Id', res) exec_log = self.client.exec_start(res) self.assertEqual(exec_log, b'hello\n') def test_exec_command_string(self): if not helpers.exec_driver_is_native(): pytest.skip('Exec driver not native') container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) res = self.client.exec_create(id, 'echo hello world') self.assertIn('Id', res) exec_log = self.client.exec_start(res) self.assertEqual(exec_log, b'hello world\n') def test_exec_command_as_user(self): if not helpers.exec_driver_is_native(): pytest.skip('Exec driver not native') container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) res = self.client.exec_create(id, 'whoami', user='default') self.assertIn('Id', res) exec_log = self.client.exec_start(res) self.assertEqual(exec_log, b'default\n') def test_exec_command_as_root(self): if not helpers.exec_driver_is_native(): pytest.skip('Exec driver not native') container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) res = self.client.exec_create(id, 'whoami') self.assertIn('Id', res) exec_log = self.client.exec_start(res) self.assertEqual(exec_log, b'root\n') def test_exec_command_streaming(self): if not helpers.exec_driver_is_native(): pytest.skip('Exec driver not native') container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] self.tmp_containers.append(id) self.client.start(id) exec_id = self.client.exec_create(id, ['echo', 'hello\nworld']) self.assertIn('Id', exec_id) res = b'' for chunk in self.client.exec_start(exec_id, stream=True): res += chunk self.assertEqual(res, b'hello\nworld\n') def test_exec_start_socket(self): if not helpers.exec_driver_is_native(): pytest.skip('Exec driver not native') container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) container_id = container['Id'] self.client.start(container_id) self.tmp_containers.append(container_id) line = 'yay, interactive exec!' # `echo` appends CRLF, `printf` doesn't exec_id = self.client.exec_create( container_id, ['printf', line], tty=True) self.assertIn('Id', exec_id) socket = self.client.exec_start(exec_id, socket=True) self.addCleanup(socket.close) next_size = helpers.next_packet_size(socket) self.assertEqual(next_size, len(line)) data = helpers.read_data(socket, next_size) self.assertEqual(data.decode('utf-8'), line) def test_exec_inspect(self): if not helpers.exec_driver_is_native(): pytest.skip('Exec driver not native') container = self.client.create_container(BUSYBOX, 'cat', detach=True, stdin_open=True) id = container['Id'] self.client.start(id) self.tmp_containers.append(id) exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist']) self.assertIn('Id', exec_id) self.client.exec_start(exec_id) exec_info = self.client.exec_inspect(exec_id) self.assertIn('ExitCode', exec_info) self.assertNotEqual(exec_info['ExitCode'], 0)
apache-2.0
moutai/scikit-learn
sklearn/cluster/tests/test_dbscan.py
176
12155
""" Tests for DBSCAN clustering algorithm """ import pickle import numpy as np from scipy.spatial import distance from scipy import sparse from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_not_in from sklearn.neighbors import NearestNeighbors from sklearn.cluster.dbscan_ import DBSCAN from sklearn.cluster.dbscan_ import dbscan from sklearn.cluster.tests.common import generate_clustered_data from sklearn.metrics.pairwise import pairwise_distances n_clusters = 3 X = generate_clustered_data(n_clusters=n_clusters) def test_dbscan_similarity(): # Tests the DBSCAN algorithm with a similarity array. # Parameters chosen specifically for this task. eps = 0.15 min_samples = 10 # Compute similarities D = distance.squareform(distance.pdist(X)) D /= np.max(D) # Compute DBSCAN core_samples, labels = dbscan(D, metric="precomputed", eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples) labels = db.fit(D).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_feature(): # Tests the DBSCAN algorithm with a feature vector array. # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 metric = 'euclidean' # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples) labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_sparse(): core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8, min_samples=10) core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10) assert_array_equal(core_dense, core_sparse) assert_array_equal(labels_dense, labels_sparse) def test_dbscan_sparse_precomputed(): D = pairwise_distances(X) nn = NearestNeighbors(radius=.9).fit(X) D_sparse = nn.radius_neighbors_graph(mode='distance') # Ensure it is sparse not merely on diagonals: assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1) core_sparse, labels_sparse = dbscan(D_sparse, eps=.8, min_samples=10, metric='precomputed') core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10, metric='precomputed') assert_array_equal(core_dense, core_sparse) assert_array_equal(labels_dense, labels_sparse) def test_dbscan_no_core_samples(): rng = np.random.RandomState(0) X = rng.rand(40, 10) X[X < .8] = 0 for X_ in [X, sparse.csr_matrix(X)]: db = DBSCAN(min_samples=6).fit(X_) assert_array_equal(db.components_, np.empty((0, X_.shape[1]))) assert_array_equal(db.labels_, -1) assert_equal(db.core_sample_indices_.shape, (0,)) def test_dbscan_callable(): # Tests the DBSCAN algorithm with a callable metric. # Parameters chosen specifically for this task. # Different eps to other test, because distance is not normalised. eps = 0.8 min_samples = 10 # metric is the function reference, not the string key. metric = distance.euclidean # Compute DBSCAN # parameters chosen for task core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples, algorithm='ball_tree') # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) def test_dbscan_balltree(): # Tests the DBSCAN algorithm with balltree for neighbor calculation. eps = 0.8 min_samples = 10 D = pairwise_distances(X) core_samples, labels = dbscan(D, metric="precomputed", eps=eps, min_samples=min_samples) # number of clusters, ignoring noise if present n_clusters_1 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_1, n_clusters) db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_2 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_2, n_clusters) db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree') labels = db.fit(X).labels_ n_clusters_3 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_3, n_clusters) db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_4 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_4, n_clusters) db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples, algorithm='ball_tree') labels = db.fit(X).labels_ n_clusters_5 = len(set(labels)) - int(-1 in labels) assert_equal(n_clusters_5, n_clusters) def test_input_validation(): # DBSCAN.fit should accept a list of lists. X = [[1., 2.], [3., 4.]] DBSCAN().fit(X) # must not raise exception def test_dbscan_badargs(): # Test bad argument values: these should all raise ValueErrors assert_raises(ValueError, dbscan, X, eps=-1.0) assert_raises(ValueError, dbscan, X, algorithm='blah') assert_raises(ValueError, dbscan, X, metric='blah') assert_raises(ValueError, dbscan, X, leaf_size=-1) assert_raises(ValueError, dbscan, X, p=-1) def test_pickle(): obj = DBSCAN() s = pickle.dumps(obj) assert_equal(type(pickle.loads(s)), obj.__class__) def test_boundaries(): # ensure min_samples is inclusive of core point core, _ = dbscan([[0], [1]], eps=2, min_samples=2) assert_in(0, core) # ensure eps is inclusive of circumference core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2) assert_in(0, core) core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2) assert_not_in(0, core) def test_weighted_dbscan(): # ensure sample_weight is validated assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2]) assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4]) # ensure sample_weight has an effect assert_array_equal([], dbscan([[0], [1]], sample_weight=None, min_samples=6)[0]) assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5], min_samples=6)[0]) assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5], min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6], min_samples=6)[0]) # points within eps of each other: assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5, sample_weight=[5, 1], min_samples=6)[0]) # and effect of non-positive and non-integer sample_weight: assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0], eps=1.5, min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1], eps=1.5, min_samples=6)[0]) assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0], eps=1.5, min_samples=6)[0]) assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1], eps=1.5, min_samples=6)[0]) # for non-negative sample_weight, cores should be identical to repetition rng = np.random.RandomState(42) sample_weight = rng.randint(0, 5, X.shape[0]) core1, label1 = dbscan(X, sample_weight=sample_weight) assert_equal(len(label1), len(X)) X_repeated = np.repeat(X, sample_weight, axis=0) core_repeated, label_repeated = dbscan(X_repeated) core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool) core_repeated_mask[core_repeated] = True core_mask = np.zeros(X.shape[0], dtype=bool) core_mask[core1] = True assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask) # sample_weight should work with precomputed distance matrix D = pairwise_distances(X) core3, label3 = dbscan(D, sample_weight=sample_weight, metric='precomputed') assert_array_equal(core1, core3) assert_array_equal(label1, label3) # sample_weight should work with estimator est = DBSCAN().fit(X, sample_weight=sample_weight) core4 = est.core_sample_indices_ label4 = est.labels_ assert_array_equal(core1, core4) assert_array_equal(label1, label4) est = DBSCAN() label5 = est.fit_predict(X, sample_weight=sample_weight) core5 = est.core_sample_indices_ assert_array_equal(core1, core5) assert_array_equal(label1, label5) assert_array_equal(label1, est.labels_) def test_dbscan_core_samples_toy(): X = [[0], [2], [3], [4], [6], [8], [10]] n_samples = len(X) for algorithm in ['brute', 'kd_tree', 'ball_tree']: # Degenerate case: every sample is a core sample, either with its own # cluster or including other close core samples. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=1) assert_array_equal(core_samples, np.arange(n_samples)) assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4]) # With eps=1 and min_samples=2 only the 3 samples from the denser area # are core samples. All other points are isolated and considered noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=2) assert_array_equal(core_samples, [1, 2, 3]) assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) # Only the sample in the middle of the dense area is core. Its two # neighbors are edge samples. Remaining samples are noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=3) assert_array_equal(core_samples, [2]) assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1]) # It's no longer possible to extract core samples with eps=1: # everything is noise. core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=4) assert_array_equal(core_samples, []) assert_array_equal(labels, -np.ones(n_samples)) def test_dbscan_precomputed_metric_with_degenerate_input_arrays(): # see https://github.com/scikit-learn/scikit-learn/issues/4641 for # more details X = np.eye(10) labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_ assert_equal(len(set(labels)), 1) X = np.zeros((10, 10)) labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_ assert_equal(len(set(labels)), 1)
bsd-3-clause
RobotCaleb/zulip
zerver/management/commands/create_realm.py
114
3832
from __future__ import absolute_import from optparse import make_option from django.conf import settings from django.core.management.base import BaseCommand from zerver.lib.actions import do_create_realm, set_default_streams from zerver.models import RealmAlias if not settings.VOYAGER: from zilencer.models import Deployment import re import sys class Command(BaseCommand): help = """Create a realm for the specified domain. Usage: python manage.py create_realm --domain=foo.com --name='Foo, Inc.'""" option_list = BaseCommand.option_list + ( make_option('-o', '--open-realm', dest='open_realm', action="store_true", default=False, help='Make this an open realm.'), make_option('-d', '--domain', dest='domain', type='str', help='The domain for the realm.'), make_option('-n', '--name', dest='name', type='str', help='The user-visible name for the realm.'), make_option('--deployment', dest='deployment_id', type='int', default=None, help='Optionally, the ID of the deployment you want to associate the realm with.'), ) def validate_domain(self, domain): # Domains can't contain whitespace if they are to be used in memcached # keys. if re.search("\s", domain): raise ValueError("Domains can't contain whitespace") # Domains must look like domains, ie have the structure of # <subdomain(s)>.<tld>. One reason for this is that bots need # to have valid looking emails. if len(domain.split(".")) < 2: raise ValueError("Domains must contain a '.'") if RealmAlias.objects.filter(domain=domain).count() > 0: raise ValueError("Cannot create a new realm that is already an alias for an existing realm") def handle(self, *args, **options): if options["domain"] is None or options["name"] is None: print >>sys.stderr, "\033[1;31mPlease provide both a domain and name.\033[0m\n" self.print_help("python manage.py", "create_realm") exit(1) if options["open_realm"] and options["deployment_id"] is not None: print >>sys.stderr, "\033[1;31mExternal deployments cannot be open realms.\033[0m\n" self.print_help("python manage.py", "create_realm") exit(1) if options["deployment_id"] is not None and settings.VOYAGER: print >>sys.stderr, "\033[1;31mExternal deployments are not supported on voyager deployments.\033[0m\n" exit(1) domain = options["domain"] name = options["name"] self.validate_domain(domain) realm, created = do_create_realm( domain, name, restricted_to_domain=not options["open_realm"]) if created: print domain, "created." if options["deployment_id"] is not None: deployment = Deployment.objects.get(id=options["deployment_id"]) deployment.realms.add(realm) deployment.save() print "Added to deployment", str(deployment.id) elif settings.ZULIP_COM: deployment = Deployment.objects.get(base_site_url="https://zulip.com/") deployment.realms.add(realm) deployment.save() # In the else case, we are not using the Deployments feature. set_default_streams(realm, ["social", "engineering"]) print "\033[1;36mDefault streams set to social,engineering,zulip!\033[0m" else: print domain, "already exists."
apache-2.0
ebar0n/django
django/db/migrations/writer.py
38
11085
import os import re from importlib import import_module from django import get_version from django.apps import apps from django.db import migrations from django.db.migrations.loader import MigrationLoader from django.db.migrations.serializer import serializer_factory from django.utils.inspect import get_func_args from django.utils.module_loading import module_dir from django.utils.timezone import now class SettingsReference(str): """ Special subclass of string which actually references a current settings value. It's treated as the value in memory, but serializes out to a settings.NAME attribute reference. """ def __new__(self, value, setting_name): return str.__new__(self, value) def __init__(self, value, setting_name): self.setting_name = setting_name class OperationWriter: def __init__(self, operation, indentation=2): self.operation = operation self.buff = [] self.indentation = indentation def serialize(self): def _write(_arg_name, _arg_value): if (_arg_name in self.operation.serialization_expand_args and isinstance(_arg_value, (list, tuple, dict))): if isinstance(_arg_value, dict): self.feed('%s={' % _arg_name) self.indent() for key, value in _arg_value.items(): key_string, key_imports = MigrationWriter.serialize(key) arg_string, arg_imports = MigrationWriter.serialize(value) args = arg_string.splitlines() if len(args) > 1: self.feed('%s: %s' % (key_string, args[0])) for arg in args[1:-1]: self.feed(arg) self.feed('%s,' % args[-1]) else: self.feed('%s: %s,' % (key_string, arg_string)) imports.update(key_imports) imports.update(arg_imports) self.unindent() self.feed('},') else: self.feed('%s=[' % _arg_name) self.indent() for item in _arg_value: arg_string, arg_imports = MigrationWriter.serialize(item) args = arg_string.splitlines() if len(args) > 1: for arg in args[:-1]: self.feed(arg) self.feed('%s,' % args[-1]) else: self.feed('%s,' % arg_string) imports.update(arg_imports) self.unindent() self.feed('],') else: arg_string, arg_imports = MigrationWriter.serialize(_arg_value) args = arg_string.splitlines() if len(args) > 1: self.feed('%s=%s' % (_arg_name, args[0])) for arg in args[1:-1]: self.feed(arg) self.feed('%s,' % args[-1]) else: self.feed('%s=%s,' % (_arg_name, arg_string)) imports.update(arg_imports) imports = set() name, args, kwargs = self.operation.deconstruct() operation_args = get_func_args(self.operation.__init__) # See if this operation is in django.db.migrations. If it is, # We can just use the fact we already have that imported, # otherwise, we need to add an import for the operation class. if getattr(migrations, name, None) == self.operation.__class__: self.feed('migrations.%s(' % name) else: imports.add('import %s' % (self.operation.__class__.__module__)) self.feed('%s.%s(' % (self.operation.__class__.__module__, name)) self.indent() for i, arg in enumerate(args): arg_value = arg arg_name = operation_args[i] _write(arg_name, arg_value) i = len(args) # Only iterate over remaining arguments for arg_name in operation_args[i:]: if arg_name in kwargs: # Don't sort to maintain signature order arg_value = kwargs[arg_name] _write(arg_name, arg_value) self.unindent() self.feed('),') return self.render(), imports def indent(self): self.indentation += 1 def unindent(self): self.indentation -= 1 def feed(self, line): self.buff.append(' ' * (self.indentation * 4) + line) def render(self): return '\n'.join(self.buff) class MigrationWriter: """ Take a Migration instance and is able to produce the contents of the migration file from it. """ def __init__(self, migration): self.migration = migration self.needs_manual_porting = False def as_string(self): """Return a string of the file contents.""" items = { "replaces_str": "", "initial_str": "", } imports = set() # Deconstruct operations operations = [] for operation in self.migration.operations: operation_string, operation_imports = OperationWriter(operation).serialize() imports.update(operation_imports) operations.append(operation_string) items["operations"] = "\n".join(operations) + "\n" if operations else "" # Format dependencies and write out swappable dependencies right dependencies = [] for dependency in self.migration.dependencies: if dependency[0] == "__setting__": dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1]) imports.add("from django.conf import settings") else: dependencies.append(" %s," % self.serialize(dependency)[0]) items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else "" # Format imports nicely, swapping imports of functions from migration files # for comments migration_imports = set() for line in list(imports): if re.match(r"^import (.*)\.\d+[^\s]*$", line): migration_imports.add(line.split("import")[1].strip()) imports.remove(line) self.needs_manual_porting = True # django.db.migrations is always used, but models import may not be. # If models import exists, merge it with migrations import. if "from django.db import models" in imports: imports.discard("from django.db import models") imports.add("from django.db import migrations, models") else: imports.add("from django.db import migrations") # Sort imports by the package / module to be imported (the part after # "from" in "from ... import ..." or after "import" in "import ..."). sorted_imports = sorted(imports, key=lambda i: i.split()[1]) items["imports"] = "\n".join(sorted_imports) + "\n" if imports else "" if migration_imports: items["imports"] += ( "\n\n# Functions from the following migrations need manual " "copying.\n# Move them and any dependencies into this file, " "then update the\n# RunPython operations to refer to the local " "versions:\n# %s" ) % "\n# ".join(sorted(migration_imports)) # If there's a replaces, make a string for it if self.migration.replaces: items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0] # Hinting that goes into comment items.update( version=get_version(), timestamp=now().strftime("%Y-%m-%d %H:%M"), ) if self.migration.initial: items['initial_str'] = "\n initial = True\n" return MIGRATION_TEMPLATE % items @property def basedir(self): migrations_package_name, _ = MigrationLoader.migrations_module(self.migration.app_label) if migrations_package_name is None: raise ValueError( "Django can't create migrations for app '%s' because " "migrations have been disabled via the MIGRATION_MODULES " "setting." % self.migration.app_label ) # See if we can import the migrations module directly try: migrations_module = import_module(migrations_package_name) except ImportError: pass else: try: return module_dir(migrations_module) except ValueError: pass # Alright, see if it's a direct submodule of the app app_config = apps.get_app_config(self.migration.app_label) maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".") if app_config.name == maybe_app_name: return os.path.join(app_config.path, migrations_package_basename) # In case of using MIGRATION_MODULES setting and the custom package # doesn't exist, create one, starting from an existing package existing_dirs, missing_dirs = migrations_package_name.split("."), [] while existing_dirs: missing_dirs.insert(0, existing_dirs.pop(-1)) try: base_module = import_module(".".join(existing_dirs)) except ImportError: continue else: try: base_dir = module_dir(base_module) except ValueError: continue else: break else: raise ValueError( "Could not locate an appropriate location to create " "migrations package %s. Make sure the toplevel " "package exists and can be imported." % migrations_package_name) final_dir = os.path.join(base_dir, *missing_dirs) if not os.path.isdir(final_dir): os.makedirs(final_dir) for missing_dir in missing_dirs: base_dir = os.path.join(base_dir, missing_dir) with open(os.path.join(base_dir, "__init__.py"), "w"): pass return final_dir @property def filename(self): return "%s.py" % self.migration.name @property def path(self): return os.path.join(self.basedir, self.filename) @classmethod def serialize(cls, value): return serializer_factory(value).serialize() MIGRATION_TEMPLATE = """\ # Generated by Django %(version)s on %(timestamp)s %(imports)s class Migration(migrations.Migration): %(replaces_str)s%(initial_str)s dependencies = [ %(dependencies)s\ ] operations = [ %(operations)s\ ] """
bsd-3-clause
MysterionRise/fantazy-predictor
enriching_data.py
1
10896
#!/usr/local/bin/python # -*- coding: utf-8 -*- import calendar import os import pandas as pd # Правила подсчета очков: # # за участие в матче – 2 очка, если сыграно 10 минут и больше; 1 очко, если сыграно меньше 10 минут # # за победу – 3 очка (в гостях); 2 очка (дома) # # за поражение – минус 3 очка (дома); минуc 2 очка (в гостях) # # Принцип начисления очков следующий: # # количество очков + количество передач + количество перехватов + количество подборов + # количество блок-шотов + количество совершенных штрафных + количество совершенных двухочковых + количество совершенных трехочковых # # - количество попыток штрафных бросков - количество попыток двухочковых – количество попыток трехочковых – # удвоенная цифра от количества потерь - количество фолов def convert_to_sec(time_str): if pd.isnull(time_str): return 0 try: m, s = time_str.split(':') return int(m) * 60 + int(s) except Exception as inst: print(time_str) print(type(inst)) # the exception instance print(inst.args) # arguments stored in .args def get_sec(row): time_str = row['minutes'] return convert_to_sec(time_str) def getOrDefault(value): if pd.isnull(value): return 0 return int(value) def extractYear(row): return row['date'].year def extractMonth(row): return row['date'].month def extractDay(row): return row['date'].day def concat1(row): return row['opponent'] + str(row['year']) def concat2(row): return row['team'] + str(row['year']) def concat3(row): return row['name'] + str(row['year']) def concat4(row): return row['opponent'] + str(row['month']) + str(row['year']) def concat5(row): return row['team'] + str(row['month']) + str(row['year']) def concat6(row): return row['name'] + str(row['month']) + str(row['year']) def getDayOfTheWeek(row): day = calendar.day_name[row['date'].weekday()] return day[:3] def convert_age(row): if pd.isnull(row['age']): return 0 years, days = row['age'].split('-') return int(years) + 1.0 * int(days) / 365 def split_result(x): try: sp = x.split('(') return sp[0].strip(), sp[1][:-1] except Exception as inst: print(x) print(type(inst)) # the exception instance print(inst.args) # arguments stored in .args # (FG + 0.5 * 3P) / FGA def calc_efg(row): try: fg = row['fg'] fg3 = row['fg3'] fga = row['fga'] if fga == 0: return 0.0 return (fg + 0.5 * fg3) / fga except Exception as inst: print(row) print(type(inst)) # the exception instance print(inst.args) # arguments stored in .args def calc_fantasy(row): if pd.isnull(row['minutes']): return 0 fantasy_points = 0 if convert_to_sec(row['minutes']) >= 10 * 60: fantasy_points += 2 else: fantasy_points += 1 if 'W' in str(row['result']): if row['location'] == '@': fantasy_points += 3 else: fantasy_points += 2 else: if row['location'] == '@': fantasy_points -= 2 else: fantasy_points -= 3 fantasy_points += getOrDefault(row['pts']) fantasy_points += getOrDefault(row['ast']) fantasy_points += getOrDefault(row['stl']) fantasy_points += getOrDefault(row['trb']) fantasy_points += getOrDefault(row['blk']) fantasy_points += getOrDefault(row['ft']) fantasy_points += getOrDefault(row['fg']) fantasy_points -= getOrDefault(row['fta']) fantasy_points -= getOrDefault(row['fga']) fantasy_points -= 2 * getOrDefault(row['tov']) fantasy_points -= getOrDefault(row['pf']) return fantasy_points def enrich_player_df(df): df['fantasy_points'] = df.apply(lambda row: calc_fantasy(row), axis=1) df['year'] = df.apply(lambda row: extractYear(row), axis=1) df['month'] = df.apply(lambda row: extractMonth(row), axis=1) df['day'] = df.apply(lambda row: extractDay(row), axis=1) df['opponent2'] = df.apply(lambda row: concat1(row), axis=1) df['opponent3'] = df.apply(lambda row: concat4(row), axis=1) df['team3'] = df.apply(lambda row: concat5(row), axis=1) df['name3'] = df.apply(lambda row: concat6(row), axis=1) df['name2'] = df.apply(lambda row: concat3(row), axis=1) df['team2'] = df.apply(lambda row: concat2(row), axis=1) df['age1'] = df.apply(lambda row: convert_age(row), axis=1) df['seconds'] = df.apply(lambda row: get_sec(row), axis=1) for i in range(1, 6): df['mean_pts_' + str(i)] = df['pts'].rolling(i).mean().shift(1) df['efg'] = df.apply(lambda row: calc_efg(row), axis=1) df['mefg'] = df['efg'].expanding().mean().shift(1) df['day_of_the_week'] = df.apply(lambda row: getDayOfTheWeek(row), axis=1) df['mfp'] = df['fantasy_points'].expanding().mean().shift(1) df['medfp'] = df['fantasy_points'].expanding().median().shift(1) df['msec'] = df['seconds'].expanding().mean().shift(1) df['mpts'] = df['pts'].expanding().mean().shift(1) df['mast'] = df['ast'].expanding().mean().shift(1) df['mtrb'] = df['trb'].expanding().mean().shift(1) df['mstl'] = df['stl'].expanding().mean().shift(1) df['mpf'] = df['pf'].expanding().mean().shift(1) df['mtov'] = df['tov'].expanding().mean().shift(1) df['mblk'] = df['blk'].expanding().mean().shift(1) df['mfg'] = df['fg'].expanding().mean().shift(1) df['mfg3'] = df['fg3'].expanding().mean().shift(1) df['mft'] = df['ft'].expanding().mean().shift(1) df['mfg3_pct'] = df['fg3_pct'].expanding().mean().shift(1) df['mfg_pct'] = df['fg_pct'].expanding().mean().shift(1) df['mft_pct'] = df['ft_pct'].expanding().mean().shift(1) # number of games in last 5 days df['rest_days'] = df['date'].diff().apply(lambda x: x.days) for i in [1, 7, 10, 11, 12]: df['mean_rest_days_' + str(i)] = df['rest_days'].rolling(i).mean().shift(1) for i in [10, 21, 31, 38, 39]: df['mean_fantasy_' + str(i)] = df['fantasy_points'].rolling(i).mean().shift(1) for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]: df['mean_sec_' + str(i)] = df['seconds'].rolling(i).mean().shift(1) for i in [3, 4, 12, 16, 17, 28, 36]: df['skew_fantasy_' + str(i)] = df['fantasy_points'].rolling(i).skew().shift(1) return df def enrich_player_df_for_upcoming_games(df): df['fantasy_points'] = df.apply(lambda row: calc_fantasy(row), axis=1) df['year'] = df.apply(lambda row: extractYear(row), axis=1) df['month'] = df.apply(lambda row: extractMonth(row), axis=1) df['day'] = df.apply(lambda row: extractDay(row), axis=1) df['opponent2'] = df.apply(lambda row: concat1(row), axis=1) df['opponent3'] = df.apply(lambda row: concat4(row), axis=1) df['team3'] = df.apply(lambda row: concat5(row), axis=1) df['name3'] = df.apply(lambda row: concat6(row), axis=1) df['name2'] = df.apply(lambda row: concat3(row), axis=1) df['team2'] = df.apply(lambda row: concat2(row), axis=1) df['age1'] = df.apply(lambda row: convert_age(row), axis=1) df['seconds'] = df.apply(lambda row: get_sec(row), axis=1) for i in range(1, 6): df['mean_pts_' + str(i)] = df['pts'].rolling(i).mean().shift(1) df['efg'] = df.apply(lambda row: calc_efg(row), axis=1) df['mefg'] = df['efg'].expanding().mean().shift(1) df['day_of_the_week'] = df.apply(lambda row: getDayOfTheWeek(row), axis=1) df['mfp'] = df['fantasy_points'].expanding().mean().shift(1) df['medfp'] = df['fantasy_points'].expanding().median().shift(1) df['msec'] = df['seconds'].expanding().mean().shift(1) df['mpts'] = df['pts'].expanding().mean().shift(1) df['mast'] = df['ast'].expanding().mean().shift(1) df['mtrb'] = df['trb'].expanding().mean().shift(1) df['mstl'] = df['stl'].expanding().mean().shift(1) df['mpf'] = df['pf'].expanding().mean().shift(1) df['mtov'] = df['tov'].expanding().mean().shift(1) df['mblk'] = df['blk'].expanding().mean().shift(1) df['mfg'] = df['fg'].expanding().mean().shift(1) df['mfg3'] = df['fg3'].expanding().mean().shift(1) df['mft'] = df['ft'].expanding().mean().shift(1) df['mfg3_pct'] = df['fg3_pct'].expanding().mean().shift(1) df['mfg_pct'] = df['fg_pct'].expanding().mean().shift(1) df['mft_pct'] = df['ft_pct'].expanding().mean().shift(1) # number of games in last 5 days df['rest_days'] = df['date'].diff().apply(lambda x: x.days) for i in [1, 7, 10, 11, 12]: df['mean_rest_days_' + str(i)] = df['rest_days'].rolling(i).mean().shift(1) for i in [10, 21, 31, 38, 39]: df['mean_fantasy_' + str(i)] = df['fantasy_points'].rolling(i).mean().shift(1) for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]: df['mean_sec_' + str(i)] = df['seconds'].rolling(i).mean().shift(1) for i in [3, 4, 12, 16, 17, 28, 36]: df['skew_fantasy_' + str(i)] = df['fantasy_points'].rolling(i).skew().shift(1) return df dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d') def enrich_all_data(): for root, dirs, files in os.walk("nba"): for file in files: if file.endswith(".csv"): try: path = os.path.join(root, file) if path.find('fantasy') == -1 and path.find('2018.csv') != -1: f = open(path) print(path) lines = f.readlines() if len(lines) > 1: df = pd.read_csv(path, parse_dates=['date'], date_parser=dateparse) if not df.empty: df.fillna(df.mean(), inplace=True) df = enrich_player_df(df) join = os.path.join(root, "fantasy") if not os.path.exists(join): os.mkdir(join) df.to_csv(os.path.join(root, "fantasy", file), index=False) except Exception as inst: print(file) print(df.head()) print(type(inst)) # the exception instance print(inst.args) # arguments stored in .args
mit
vasili-v/themis
vendor/github.com/apache/thrift/test/py.tornado/test_suite.py
10
6403
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import datetime import glob import os import sys import time import unittest basepath = os.path.abspath(os.path.dirname(__file__)) sys.path.insert(0, basepath + '/gen-py.tornado') sys.path.insert(0, glob.glob(os.path.join(basepath, '../../lib/py/build/lib*'))[0]) try: __import__('tornado') except ImportError: print("module `tornado` not found, skipping test") sys.exit(0) from tornado import gen from tornado.testing import AsyncTestCase, get_unused_port, gen_test from thrift import TTornado from thrift.Thrift import TApplicationException from thrift.protocol import TBinaryProtocol from ThriftTest import ThriftTest from ThriftTest.ttypes import Xception, Xtruct class TestHandler(object): def __init__(self, test_instance): self.test_instance = test_instance def testVoid(self): pass def testString(self, s): if s == 'unexpected_error': raise Exception(s) return s def testByte(self, b): return b def testI16(self, i16): return i16 def testI32(self, i32): return i32 def testI64(self, i64): return i64 def testDouble(self, dub): return dub def testBinary(self, thing): return thing def testStruct(self, thing): return thing def testException(self, s): if s == 'Xception': x = Xception() x.errorCode = 1001 x.message = s raise x elif s == 'throw_undeclared': raise ValueError('testing undeclared exception') def testOneway(self, seconds): start = time.time() def fire_oneway(): end = time.time() self.test_instance.stop((start, end, seconds)) self.test_instance.io_loop.add_timeout( datetime.timedelta(seconds=seconds), fire_oneway) raise Exception('testing exception in oneway method') def testNest(self, thing): return thing @gen.coroutine def testMap(self, thing): yield gen.moment raise gen.Return(thing) def testSet(self, thing): return thing def testList(self, thing): return thing def testEnum(self, thing): return thing def testTypedef(self, thing): return thing class ThriftTestCase(AsyncTestCase): def setUp(self): super(ThriftTestCase, self).setUp() self.port = get_unused_port() # server self.handler = TestHandler(self) self.processor = ThriftTest.Processor(self.handler) self.pfactory = TBinaryProtocol.TBinaryProtocolFactory() self.server = TTornado.TTornadoServer(self.processor, self.pfactory, io_loop=self.io_loop) self.server.bind(self.port) self.server.start(1) # client transport = TTornado.TTornadoStreamTransport('localhost', self.port, io_loop=self.io_loop) pfactory = TBinaryProtocol.TBinaryProtocolFactory() self.io_loop.run_sync(transport.open) self.client = ThriftTest.Client(transport, pfactory) @gen_test def test_void(self): v = yield self.client.testVoid() self.assertEqual(v, None) @gen_test def test_string(self): v = yield self.client.testString('Python') self.assertEqual(v, 'Python') @gen_test def test_byte(self): v = yield self.client.testByte(63) self.assertEqual(v, 63) @gen_test def test_i32(self): v = yield self.client.testI32(-1) self.assertEqual(v, -1) v = yield self.client.testI32(0) self.assertEqual(v, 0) @gen_test def test_i64(self): v = yield self.client.testI64(-34359738368) self.assertEqual(v, -34359738368) @gen_test def test_double(self): v = yield self.client.testDouble(-5.235098235) self.assertEqual(v, -5.235098235) @gen_test def test_struct(self): x = Xtruct() x.string_thing = "Zero" x.byte_thing = 1 x.i32_thing = -3 x.i64_thing = -5 y = yield self.client.testStruct(x) self.assertEqual(y.string_thing, "Zero") self.assertEqual(y.byte_thing, 1) self.assertEqual(y.i32_thing, -3) self.assertEqual(y.i64_thing, -5) @gen_test def test_oneway(self): self.client.testOneway(1) v = yield self.client.testI32(-1) self.assertEqual(v, -1) @gen_test def test_map(self): """ TestHandler.testMap is a coroutine, this test checks if gen.Return() from a coroutine works. """ expected = {1: 1} res = yield self.client.testMap(expected) self.assertEqual(res, expected) @gen_test def test_exception(self): try: yield self.client.testException('Xception') except Xception as ex: self.assertEqual(ex.errorCode, 1001) self.assertEqual(ex.message, 'Xception') else: self.fail("should have gotten exception") try: yield self.client.testException('throw_undeclared') except TApplicationException: pass else: self.fail("should have gotten exception") yield self.client.testException('Safe') def suite(): suite = unittest.TestSuite() loader = unittest.TestLoader() suite.addTest(loader.loadTestsFromTestCase(ThriftTestCase)) return suite if __name__ == '__main__': unittest.TestProgram(defaultTest='suite', testRunner=unittest.TextTestRunner(verbosity=1))
apache-2.0
Universal-Model-Converter/UMC3.0a
data/Python/x86/Lib/test/test_imp.py
91
2339
import imp import unittest from test import test_support class LockTests(unittest.TestCase): """Very basic test of import lock functions.""" def verify_lock_state(self, expected): self.assertEqual(imp.lock_held(), expected, "expected imp.lock_held() to be %r" % expected) def testLock(self): LOOPS = 50 # The import lock may already be held, e.g. if the test suite is run # via "import test.autotest". lock_held_at_start = imp.lock_held() self.verify_lock_state(lock_held_at_start) for i in range(LOOPS): imp.acquire_lock() self.verify_lock_state(True) for i in range(LOOPS): imp.release_lock() # The original state should be restored now. self.verify_lock_state(lock_held_at_start) if not lock_held_at_start: try: imp.release_lock() except RuntimeError: pass else: self.fail("release_lock() without lock should raise " "RuntimeError") class ReloadTests(unittest.TestCase): """Very basic tests to make sure that imp.reload() operates just like reload().""" def test_source(self): # XXX (ncoghlan): It would be nice to use test_support.CleanImport # here, but that breaks because the os module registers some # handlers in copy_reg on import. Since CleanImport doesn't # revert that registration, the module is left in a broken # state after reversion. Reinitialising the module contents # and just reverting os.environ to its previous state is an OK # workaround with test_support.EnvironmentVarGuard(): import os imp.reload(os) def test_extension(self): with test_support.CleanImport('time'): import time imp.reload(time) def test_builtin(self): with test_support.CleanImport('marshal'): import marshal imp.reload(marshal) def test_main(): tests = [ ReloadTests, ] try: import thread except ImportError: pass else: tests.append(LockTests) test_support.run_unittest(*tests) if __name__ == "__main__": test_main()
mit
ULHPC/easybuild-framework
easybuild/scripts/repo_setup.py
3
3206
#!/usr/bin/env python ## # Copyright 2009-2017 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ This script creates the directory structure used by easybuild (https://github.com/easybuild/easybuild) You can use this to set up your private repo with easyblocks and easyconfigs directories Usage: repo_setup.py Note: you might want to put this directory under revision control. :author: Stijn De Weirdt (Ghent University) :author: Dries Verdegem (Ghent University) :author: Kenneth Hoste (Ghent University) :author: Pieter De Baets (Ghent University) :author: Jens Timmerman (Ghent University) """ import os import sys def create_dir(prefix, dirname, withinit=False, init_txt=''): os.mkdir(os.path.join(prefix, dirname)) if withinit: fh = open(os.path.join(prefix, dirname, "__init__.py"), 'w') fh.write(init_txt) fh.close() def create_subdirs(prefix): # create subdirectories a, b, ..., z, 0 (catchall) alphabet = [chr(x) for x in xrange(ord('a'), ord('z') + 1)] for letter in alphabet: create_dir(prefix, letter) create_dir(prefix, "0") create_dir(prefix, "_generic_") # # MAIN # if len(sys.argv) > 1: sys.stderr.write("Usage: %s\n" % sys.argv[0]) try: # create root dir 'easybuild' and change into it dirname = "easybuild" os.mkdir(dirname) os.chdir(dirname) # create easyblocks dir and subdirs, with default init dirname = "easyblocks" os.mkdir(dirname) init_txt="""import os from pkgutil import extend_path # Extend path so python finds our easyblocks in the subdirectories where they are located subdirs = [chr(l) for l in range(ord('a'),ord('z')+1)] + ['0', '_generic_'] __path__.extend([os.path.join(__path__[0], subdir) for subdir in subdirs]) # And let python know this is not the only place to look for them, # so we can have 2 easybuild/easyblock paths in your pythonpath, one for public, one for private easyblocks. __path__ = extend_path(__path__, __name__) """ create_subdirs(dirname) # create easyconfigs dir and subdirs dirname = "easyconfigs" os.mkdir(dirname) create_subdirs(dirname) except (IOError, OSError), err: sys.stderr.write("Repo setup failed: %s" % err) sys.exit(1)
gpl-2.0
Gixugif/CDRecording
Call_Detail_Record.py
1
2248
#!/usr/bin/python # -*- coding: utf-8 -*- # Title: Call_Detail_Record # Description: Class for one # CDR # separately. # Date: 6/9/16 # Author: Jeffrey Zic class Call_Detail_Record: """ Call Detail Records contain metadata for phone calls.""" def __init__(self): self.bbx_cdr_id = ('', ) self.network_addr = ('', ) self.bbx_fax_inbound_id = ('', ) self.billsec = ('', ) self.original_callee_id_name = ('', ) self.end_timestamp = ('', ) self.direction = ('', ) self.destination_name = ('', ) self.transfer_source = ('', ) self.original_callee_id_number = ('', ) self.write_rate = ('', ) self.transfer_to = ('', ) self.write_codec = ('', ) self.context = ('', ) self.callee_bbx_phone_id = ('', ) self.destination_number = ('', ) self.caller_id_number = ('', ) self.caller_bbx_phone_registration_id = ('', ) self.hangup_cause = ('', ) self.original_caller_id_number = ('', ) self.gateway_name = ('', ) self.record_file_name = ('', ) self.callee_bbx_user_id = ('', ) self.record_file_checksum = ('', ) self.caller_bbx_phone_id = ('', ) self.duration = ('', ) self.callee_bbx_phone_registration_id = ('', ) self.answer_timestamp = ('', ) self.hangup_originator = ('', ) self.transfer_history = ('', ) self.call_type = ('', ) self.source_table = ('', ) self.bbx_queue_id = ('', ) self.hold_events = ('', ) self.start_timestamp = ('', ) self.uuid = ('', ) self.record_keep_days = ('', ) self.bbx_fax_outbound_id = ('', ) self.bleg_uuid = ('', ) self.bbx_callflow_id = ('', ) self.destination_list = ('', ) self.caller_id_name = ('', ) self.click_to_call_uuid = ('', ) self.read_rate = ('', ) self.original_caller_id_name = ('', ) self.recording_retention = ('', ) self.caller_bbx_user_id = ('', ) self.destination_type = ('', ) self.outbound_route = ('', ) self.processed = ('', ) self.accountcode = ('', ) self.read_codec = ''
gpl-3.0
amisrs/one-eighty
venv2/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
762
3532
import hashlib import os from pip._vendor.lockfile import LockFile from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile from ..cache import BaseCache from ..controller import CacheController def _secure_open_write(filename, fmode): # We only want to write to this file, so open it in write only mode flags = os.O_WRONLY # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only # will open *new* files. # We specify this because we want to ensure that the mode we pass is the # mode of the file. flags |= os.O_CREAT | os.O_EXCL # Do not follow symlinks to prevent someone from making a symlink that # we follow and insecurely open a cache file. if hasattr(os, "O_NOFOLLOW"): flags |= os.O_NOFOLLOW # On Windows we'll mark this file as binary if hasattr(os, "O_BINARY"): flags |= os.O_BINARY # Before we open our file, we want to delete any existing file that is # there try: os.remove(filename) except (IOError, OSError): # The file must not exist already, so we can just skip ahead to opening pass # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a # race condition happens between the os.remove and this line, that an # error will be raised. Because we utilize a lockfile this should only # happen if someone is attempting to attack us. fd = os.open(filename, flags, fmode) try: return os.fdopen(fd, "wb") except: # An error occurred wrapping our FD in a file object os.close(fd) raise class FileCache(BaseCache): def __init__(self, directory, forever=False, filemode=0o0600, dirmode=0o0700, use_dir_lock=None, lock_class=None): if use_dir_lock is not None and lock_class is not None: raise ValueError("Cannot use use_dir_lock and lock_class together") if use_dir_lock: lock_class = MkdirLockFile if lock_class is None: lock_class = LockFile self.directory = directory self.forever = forever self.filemode = filemode self.dirmode = dirmode self.lock_class = lock_class @staticmethod def encode(x): return hashlib.sha224(x.encode()).hexdigest() def _fn(self, name): # NOTE: This method should not change as some may depend on it. # See: https://github.com/ionrock/cachecontrol/issues/63 hashed = self.encode(name) parts = list(hashed[:5]) + [hashed] return os.path.join(self.directory, *parts) def get(self, key): name = self._fn(key) if not os.path.exists(name): return None with open(name, 'rb') as fh: return fh.read() def set(self, key, value): name = self._fn(key) # Make sure the directory exists try: os.makedirs(os.path.dirname(name), self.dirmode) except (IOError, OSError): pass with self.lock_class(name) as lock: # Write our actual file with _secure_open_write(lock.path, self.filemode) as fh: fh.write(value) def delete(self, key): name = self._fn(key) if not self.forever: os.remove(name) def url_to_file_path(url, filecache): """Return the file cache path based on the URL. This does not ensure the file exists! """ key = CacheController.cache_url(url) return filecache._fn(key)
mit
horance-liu/tensorflow
tensorflow/contrib/distributions/python/ops/gumbel.py
65
7694
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Gumbel distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops.distributions import distribution class _Gumbel(distribution.Distribution): """The scalar Gumbel distribution with location `loc` and `scale` parameters. #### Mathematical details The probability density function (pdf) of this distribution is, ```none pdf(x; mu, sigma) = exp(-(x - mu) / sigma - exp(-(x - mu) / sigma)) ``` where `loc = mu` and `scale = sigma`. The cumulative density function of this distribution is, ```cdf(x; mu, sigma) = exp(-exp(-(x - mu) / sigma))``` The Gumbel distribution is a member of the [location-scale family]( https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be constructed as, ```none X ~ Gumbel(loc=0, scale=1) Y = loc + scale * X ``` #### Examples Examples of initialization of one or a batch of distributions. ```python # Define a single scalar Gumbel distribution. dist = tf.contrib.distributions.Gumbel(loc=0., scale=3.) # Evaluate the cdf at 1, returning a scalar. dist.cdf(1.) # Define a batch of two scalar valued Gumbels. # The first has mean 1 and scale 11, the second 2 and 22. dist = tf.contrib.distributions.Gumbel(loc=[1, 2.], scale=[11, 22.]) # Evaluate the pdf of the first distribution on 0, and the second on 1.5, # returning a length two tensor. dist.prob([0, 1.5]) # Get 3 samples, returning a 3 x 2 tensor. dist.sample([3]) ``` Arguments are broadcast when possible. ```python # Define a batch of two scalar valued Logistics. # Both have mean 1, but different scales. dist = tf.contrib.distributions.Gumbel(loc=1., scale=[11, 22.]) # Evaluate the pdf of both distributions on the same point, 3.0, # returning a length 2 tensor. dist.prob(3.0) ``` """ def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name="Gumbel"): """Construct Gumbel distributions with location and scale `loc` and `scale`. The parameters `loc` and `scale` must be shaped in a way that supports broadcasting (e.g. `loc + scale` is a valid operation). Args: loc: Floating point tensor, the means of the distribution(s). scale: Floating point tensor, the scales of the distribution(s). scale must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: if loc and scale are different dtypes. """ parameters = locals() with ops.name_scope(name, values=[loc, scale]): with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []): self._loc = array_ops.identity(loc, name="loc") self._scale = array_ops.identity(scale, name="scale") check_ops.assert_same_float_dtype([self._loc, self._scale]) super(_Gumbel, self).__init__( dtype=self._scale.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._loc, self._scale], name=name) @staticmethod def _param_shapes(sample_shape): return dict( zip(("loc", "scale"), ([ops.convert_to_tensor( sample_shape, dtype=dtypes.int32)] * 2))) @property def loc(self): """Distribution parameter for the location.""" return self._loc @property def scale(self): """Distribution parameter for scale.""" return self._scale def _batch_shape_tensor(self): return array_ops.broadcast_dynamic_shape( array_ops.shape(self.loc), array_ops.shape(self.scale)) def _batch_shape(self): return array_ops.broadcast_static_shape( self.loc.get_shape(), self.scale.get_shape()) def _event_shape_tensor(self): return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): # Uniform variates must be sampled from the open-interval `(0, 1)` rather # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny` # because it is the smallest, positive, "normal" number. A "normal" number # is such that the mantissa has an implicit leading 1. Normal, positive # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In # this case, a subnormal number (i.e., np.nextafter) can cause us to sample # 0. uniform = random_ops.random_uniform( shape=array_ops.concat([[n], self.batch_shape_tensor()], 0), minval=np.finfo(self.dtype.as_numpy_dtype).tiny, maxval=1., dtype=self.dtype, seed=seed) sampled = -math_ops.log(-math_ops.log(uniform)) return sampled * self.scale + self.loc def _log_prob(self, x): return self._log_unnormalized_prob(x) - self._log_normalization() def _prob(self, x): return math_ops.exp(self._log_prob(x)) def _log_cdf(self, x): return -math_ops.exp(-self._z(x)) def _cdf(self, x): return math_ops.exp(-math_ops.exp(-self._z(x))) def _log_unnormalized_prob(self, x): z = self._z(x) return - z - math_ops.exp(-z) def _log_normalization(self): return math_ops.log(self.scale) def _entropy(self): # Use broadcasting rules to calculate the full broadcast sigma. scale = self.scale * array_ops.ones_like(self.loc) return 1 + math_ops.log(scale) + np.euler_gamma def _mean(self): return self.loc + self.scale * np.euler_gamma def _stddev(self): return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(6) def _mode(self): return self.loc * array_ops.ones_like(self.scale) def _z(self, x): """Standardize input `x` to a unit logistic.""" with ops.name_scope("standardize", values=[x]): return (x - self.loc) / self.scale
apache-2.0
marinkaz/orange3
Orange/classification/naive_bayes.py
2
2294
import numpy as np from Orange.classification import Learner, Model from Orange.data import Instance, Storage, Table, DiscreteVariable from Orange.statistics import contingency from Orange.preprocess import Discretize __all__ = ["NaiveBayesLearner"] class NaiveBayesLearner(Learner): """ Naive Bayes classifier. Works only with discrete attributes. By default, continuous attributes are discretized. Parameters ---------- preprocessors : list, optional (default="[Orange.preprocess.Discretize]") An ordered list of preprocessors applied to data before training or testing. """ name = 'naive bayes' preprocessors = [Discretize()] def fit_storage(self, table): if not isinstance(table, Storage): raise TypeError("Data is not a subclass of Orange.data.Storage.") if not all(var.is_discrete for var in table.domain.variables): raise NotImplementedError("Only discrete variables are supported.") cont = contingency.get_contingencies(table) class_freq = np.array(np.diag( contingency.get_contingency(table, table.domain.class_var))) return NaiveBayesModel(cont, class_freq, table.domain) class NaiveBayesModel(Model): def __init__(self, cont, class_freq, domain): super().__init__(domain) self.cont = cont self.class_freq = class_freq def predict_storage(self, data): if isinstance(data, Instance): data = [data] ncv = len(self.domain.class_var.values) probs = np.zeros((len(data), ncv)) for i, ins in enumerate(data): for c in range(ncv): py = (1 + self.class_freq[c]) / (ncv + sum(self.class_freq)) log_prob = np.log(py) for ai, a in enumerate(self.domain.attributes): if not np.isnan(ins[a]): relevant = 1 + self.cont[ai][c][a.to_val(ins[a])] total = len(a.values) + self.class_freq[c] log_prob += np.log(relevant / total) probs[i, c] = log_prob np.exp(probs, out=probs) probs /= probs.sum(axis=1)[:, None] values = probs.argmax(axis=1) return values, probs
bsd-2-clause
steveb/heat
heat/tests/openstack/neutron/test_neutron_metering.py
4
11163
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutronclient.common import exceptions from neutronclient.v2_0 import client as neutronclient import six from heat.common import exception from heat.common import template_format from heat.engine.resources.openstack.neutron import metering from heat.engine import scheduler from heat.tests import common from heat.tests import utils metering_template = ''' heat_template_version: 2015-04-30 description: Template to test metering resources resources: label: type: OS::Neutron::MeteringLabel properties: name: TestLabel description: Description of TestLabel shared: True rule: type: OS::Neutron::MeteringRule properties: metering_label_id: { get_resource: label } remote_ip_prefix: 10.0.3.0/24 direction: ingress excluded: false ''' class MeteringLabelTest(common.HeatTestCase): def setUp(self): super(MeteringLabelTest, self).setUp() self.m.StubOutWithMock(neutronclient.Client, 'create_metering_label') self.m.StubOutWithMock(neutronclient.Client, 'delete_metering_label') self.m.StubOutWithMock(neutronclient.Client, 'show_metering_label') self.m.StubOutWithMock(neutronclient.Client, 'create_metering_label_rule') self.m.StubOutWithMock(neutronclient.Client, 'delete_metering_label_rule') self.m.StubOutWithMock(neutronclient.Client, 'show_metering_label_rule') def create_metering_label(self): neutronclient.Client.create_metering_label({ 'metering_label': { 'name': 'TestLabel', 'description': 'Description of TestLabel', 'shared': True} }).AndReturn({'metering_label': {'id': '1234'}}) snippet = template_format.parse(metering_template) self.stack = utils.parse_stack(snippet) resource_defns = self.stack.t.resource_definitions(self.stack) return metering.MeteringLabel( 'label', resource_defns['label'], self.stack) def test_create(self): rsrc = self.create_metering_label() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_create_failed(self): neutronclient.Client.create_metering_label({ 'metering_label': { 'name': 'TestLabel', 'description': 'Description of TestLabel', 'shared': True} }).AndRaise(exceptions.NeutronClientException()) self.m.ReplayAll() snippet = template_format.parse(metering_template) stack = utils.parse_stack(snippet) resource_defns = stack.t.resource_definitions(stack) rsrc = metering.MeteringLabel( 'label', resource_defns['label'], stack) error = self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.create)) self.assertEqual( 'NeutronClientException: resources.label: ' 'An unknown exception occurred.', six.text_type(error)) self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state) self.m.VerifyAll() def test_delete(self): neutronclient.Client.delete_metering_label('1234') neutronclient.Client.show_metering_label('1234').AndRaise( exceptions.NeutronClientException(status_code=404)) rsrc = self.create_metering_label() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() scheduler.TaskRunner(rsrc.delete)() self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_delete_already_gone(self): neutronclient.Client.delete_metering_label('1234').AndRaise( exceptions.NeutronClientException(status_code=404)) rsrc = self.create_metering_label() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() scheduler.TaskRunner(rsrc.delete)() self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_delete_failed(self): neutronclient.Client.delete_metering_label('1234').AndRaise( exceptions.NeutronClientException(status_code=400)) rsrc = self.create_metering_label() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() error = self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.delete)) self.assertEqual( 'NeutronClientException: resources.label: ' 'An unknown exception occurred.', six.text_type(error)) self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state) self.m.VerifyAll() def test_attribute(self): rsrc = self.create_metering_label() neutronclient.Client.show_metering_label('1234').MultipleTimes( ).AndReturn( {'metering_label': {'name': 'TestLabel', 'description': 'Description of TestLabel', 'shared': True}}) self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual('TestLabel', rsrc.FnGetAtt('name')) self.assertEqual('Description of TestLabel', rsrc.FnGetAtt('description')) self.assertTrue(rsrc.FnGetAtt('shared')) self.m.VerifyAll() class MeteringRuleTest(common.HeatTestCase): def setUp(self): super(MeteringRuleTest, self).setUp() self.m.StubOutWithMock(neutronclient.Client, 'create_metering_label') self.m.StubOutWithMock(neutronclient.Client, 'delete_metering_label') self.m.StubOutWithMock(neutronclient.Client, 'show_metering_label') self.m.StubOutWithMock(neutronclient.Client, 'create_metering_label_rule') self.m.StubOutWithMock(neutronclient.Client, 'delete_metering_label_rule') self.m.StubOutWithMock(neutronclient.Client, 'show_metering_label_rule') def create_metering_label_rule(self): neutronclient.Client.create_metering_label_rule({ 'metering_label_rule': { 'metering_label_id': '1234', 'remote_ip_prefix': '10.0.3.0/24', 'direction': 'ingress', 'excluded': False} }).AndReturn({'metering_label_rule': {'id': '5678'}}) snippet = template_format.parse(metering_template) self.stack = utils.parse_stack(snippet) self.patchobject(self.stack['label'], 'FnGetRefId', return_value='1234') resource_defns = self.stack.t.resource_definitions(self.stack) return metering.MeteringRule( 'rule', resource_defns['rule'], self.stack) def test_create(self): rsrc = self.create_metering_label_rule() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_create_failed(self): neutronclient.Client.create_metering_label_rule({ 'metering_label_rule': { 'metering_label_id': '1234', 'remote_ip_prefix': '10.0.3.0/24', 'direction': 'ingress', 'excluded': False} }).AndRaise(exceptions.NeutronClientException()) self.m.ReplayAll() snippet = template_format.parse(metering_template) stack = utils.parse_stack(snippet) self.patchobject(stack['label'], 'FnGetRefId', return_value='1234') resource_defns = stack.t.resource_definitions(stack) rsrc = metering.MeteringRule( 'rule', resource_defns['rule'], stack) error = self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.create)) self.assertEqual( 'NeutronClientException: resources.rule: ' 'An unknown exception occurred.', six.text_type(error)) self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state) self.m.VerifyAll() def test_delete(self): neutronclient.Client.delete_metering_label_rule('5678') neutronclient.Client.show_metering_label_rule('5678').AndRaise( exceptions.NeutronClientException(status_code=404)) rsrc = self.create_metering_label_rule() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() scheduler.TaskRunner(rsrc.delete)() self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_delete_already_gone(self): neutronclient.Client.delete_metering_label_rule('5678').AndRaise( exceptions.NeutronClientException(status_code=404)) rsrc = self.create_metering_label_rule() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() scheduler.TaskRunner(rsrc.delete)() self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_delete_failed(self): neutronclient.Client.delete_metering_label_rule('5678').AndRaise( exceptions.NeutronClientException(status_code=400)) rsrc = self.create_metering_label_rule() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() error = self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.delete)) self.assertEqual( 'NeutronClientException: resources.rule: ' 'An unknown exception occurred.', six.text_type(error)) self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state) self.m.VerifyAll() def test_attribute(self): rsrc = self.create_metering_label_rule() neutronclient.Client.show_metering_label_rule('5678').MultipleTimes( ).AndReturn( {'metering_label_rule': {'metering_label_id': '1234', 'remote_ip_prefix': '10.0.3.0/24', 'direction': 'ingress', 'excluded': False}}) self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual('10.0.3.0/24', rsrc.FnGetAtt('remote_ip_prefix')) self.assertEqual('ingress', rsrc.FnGetAtt('direction')) self.assertIs(False, rsrc.FnGetAtt('excluded')) self.m.VerifyAll()
apache-2.0
jspargo/AneMo
django/lib/python2.7/site-packages/django/contrib/formtools/tests/wizard/namedwizardtests/tests.py
49
16581
from __future__ import unicode_literals import copy from django.core.urlresolvers import reverse from django.http import QueryDict from django.test import TestCase from django.utils._os import upath from django.contrib.auth.models import User from django.contrib.auth.tests.utils import skipIfCustomUser from django.contrib.formtools.wizard.views import (NamedUrlSessionWizardView, NamedUrlCookieWizardView) from django.contrib.formtools.tests.wizard.test_forms import get_request, Step1, Step2 from .forms import temp_storage # On Python 2, __file__ may end with .pyc THIS_FILE = upath(__file__).rstrip("c") UPLOADED_FILE_NAME = 'tests.py' class NamedWizardTests(object): urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls' def setUp(self): self.testuser, created = User.objects.get_or_create(username='testuser1') # Get new step data, since we modify it during the tests. self.wizard_step_data = copy.deepcopy(self.wizard_step_data) self.wizard_step_data[0]['form1-user'] = self.testuser.pk def tearDown(self): # Ensure that there are no files in the storage which could lead to false # results in the next tests. Deleting the whole storage dir is not really # an option since the storage is defined on the module level and can't be # easily reinitialized. (FIXME: The tests here should use the view classes # directly instead of the test client, then the storage issues would go # away too.) for file in temp_storage.listdir('')[1]: temp_storage.delete(file) def test_initial_call(self): response = self.client.get(reverse('%s_start' % self.wizard_urlname)) self.assertEqual(response.status_code, 302) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) wizard = response.context['wizard'] self.assertEqual(wizard['steps'].current, 'form1') self.assertEqual(wizard['steps'].step0, 0) self.assertEqual(wizard['steps'].step1, 1) self.assertEqual(wizard['steps'].last, 'form4') self.assertEqual(wizard['steps'].prev, None) self.assertEqual(wizard['steps'].next, 'form2') self.assertEqual(wizard['steps'].count, 4) self.assertEqual(wizard['url_name'], self.wizard_urlname) def test_initial_call_with_params(self): get_params = {'getvar1': 'getval1', 'getvar2': 'getval2'} response = self.client.get(reverse('%s_start' % self.wizard_urlname), get_params) self.assertEqual(response.status_code, 302) # Test for proper redirect GET parameters location = response.url self.assertNotEqual(location.find('?'), -1) querydict = QueryDict(location[location.find('?') + 1:]) self.assertEqual(dict(querydict.items()), get_params) def test_form_post_error(self): response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': 'form1'}), self.wizard_step_1_data) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') self.assertEqual(response.context['wizard']['form'].errors, {'name': ['This field is required.'], 'user': ['This field is required.']}) def test_form_post_success(self): response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': 'form1'}), self.wizard_step_data[0]) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) wizard = response.context['wizard'] self.assertEqual(wizard['steps'].current, 'form2') self.assertEqual(wizard['steps'].step0, 1) self.assertEqual(wizard['steps'].prev, 'form1') self.assertEqual(wizard['steps'].next, 'form3') def test_form_stepback(self): response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form1'})) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': 'form1'}), self.wizard_step_data[0]) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form2') response = self.client.post( reverse(self.wizard_urlname, kwargs={ 'step': response.context['wizard']['steps'].current }), {'wizard_goto_step': response.context['wizard']['steps'].prev}) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') def test_form_jump(self): response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form1'})) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form3'})) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form3') def test_form_finish(self): response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form1'})) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[0]) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form2') post_data = self.wizard_step_data[1] with open(THIS_FILE, 'rb') as post_file: post_data['form2-file1'] = post_file response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), post_data) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form3') # Check that the file got uploaded properly. with open(THIS_FILE, 'rb') as f, temp_storage.open(UPLOADED_FILE_NAME) as f2: self.assertEqual(f.read(), f2.read()) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[2]) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form4') response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[3]) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) # After the wizard is done no files should exist anymore. self.assertFalse(temp_storage.exists(UPLOADED_FILE_NAME)) all_data = response.context['form_list'] del all_data[1]['file1'] self.assertEqual(all_data, [ {'name': 'Pony', 'thirsty': True, 'user': self.testuser}, {'address1': '123 Main St', 'address2': 'Djangoland'}, {'random_crap': 'blah blah'}, [{'random_crap': 'blah blah'}, {'random_crap': 'blah blah'}]]) def test_cleaned_data(self): response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form1'})) self.assertEqual(response.status_code, 200) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[0]) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) post_data = self.wizard_step_data[1] with open(THIS_FILE, 'rb') as post_file: post_data['form2-file1'] = post_file response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), post_data) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) self.assertTrue(temp_storage.exists(UPLOADED_FILE_NAME)) step2_url = reverse(self.wizard_urlname, kwargs={'step': 'form2'}) response = self.client.get(step2_url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form2') with open(THIS_FILE, 'rb') as f, temp_storage.open(UPLOADED_FILE_NAME) as f2: self.assertEqual(f.read(), f2.read()) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[2]) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[3]) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) all_data = response.context['all_cleaned_data'] self.assertEqual(all_data['file1'].name, UPLOADED_FILE_NAME) self.assertTrue(all_data['file1'].closed) self.assertFalse(temp_storage.exists(UPLOADED_FILE_NAME)) del all_data['file1'] self.assertEqual( all_data, {'name': 'Pony', 'thirsty': True, 'user': self.testuser, 'address1': '123 Main St', 'address2': 'Djangoland', 'random_crap': 'blah blah', 'formset-form4': [ {'random_crap': 'blah blah'}, {'random_crap': 'blah blah'} ]}) form_dict = response.context['form_dict'] self.assertIn('form1', form_dict.keys()) self.assertIn('form2', form_dict.keys()) self.assertEqual(form_dict['form1'].cleaned_data, response.context['form_list'][0]) def test_manipulated_data(self): response = self.client.get( reverse(self.wizard_urlname, kwargs={'step': 'form1'})) self.assertEqual(response.status_code, 200) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[0]) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) post_data = self.wizard_step_data[1] with open(THIS_FILE, 'rb') as post_file: post_data['form2-file1'] = post_file response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), post_data) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[2]) loc = response.url response = self.client.get(loc) self.assertEqual(response.status_code, 200, loc) self.client.cookies.pop('sessionid', None) self.client.cookies.pop('wizard_cookie_contact_wizard', None) response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': response.context['wizard']['steps'].current}), self.wizard_step_data[3]) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') def test_form_reset(self): response = self.client.post( reverse(self.wizard_urlname, kwargs={'step': 'form1'}), self.wizard_step_data[0]) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form2') response = self.client.get( '%s?reset=1' % reverse('%s_start' % self.wizard_urlname)) self.assertEqual(response.status_code, 302) response = self.client.get(response.url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['wizard']['steps'].current, 'form1') @skipIfCustomUser class NamedSessionWizardTests(NamedWizardTests, TestCase): wizard_urlname = 'nwiz_session' wizard_step_1_data = { 'session_contact_wizard-current_step': 'form1', } wizard_step_data = ( { 'form1-name': 'Pony', 'form1-thirsty': '2', 'session_contact_wizard-current_step': 'form1', }, { 'form2-address1': '123 Main St', 'form2-address2': 'Djangoland', 'session_contact_wizard-current_step': 'form2', }, { 'form3-random_crap': 'blah blah', 'session_contact_wizard-current_step': 'form3', }, { 'form4-INITIAL_FORMS': '0', 'form4-TOTAL_FORMS': '2', 'form4-MAX_NUM_FORMS': '0', 'form4-0-random_crap': 'blah blah', 'form4-1-random_crap': 'blah blah', 'session_contact_wizard-current_step': 'form4', } ) @skipIfCustomUser class NamedCookieWizardTests(NamedWizardTests, TestCase): wizard_urlname = 'nwiz_cookie' wizard_step_1_data = { 'cookie_contact_wizard-current_step': 'form1', } wizard_step_data = ( { 'form1-name': 'Pony', 'form1-thirsty': '2', 'cookie_contact_wizard-current_step': 'form1', }, { 'form2-address1': '123 Main St', 'form2-address2': 'Djangoland', 'cookie_contact_wizard-current_step': 'form2', }, { 'form3-random_crap': 'blah blah', 'cookie_contact_wizard-current_step': 'form3', }, { 'form4-INITIAL_FORMS': '0', 'form4-TOTAL_FORMS': '2', 'form4-MAX_NUM_FORMS': '0', 'form4-0-random_crap': 'blah blah', 'form4-1-random_crap': 'blah blah', 'cookie_contact_wizard-current_step': 'form4', } ) class NamedFormTests(object): urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls' def test_revalidation(self): request = get_request() testform = self.formwizard_class.as_view( [('start', Step1), ('step2', Step2)], url_name=self.wizard_urlname) response, instance = testform(request, step='done') instance.render_done(None) self.assertEqual(instance.storage.current_step, 'start') class TestNamedUrlSessionWizardView(NamedUrlSessionWizardView): def dispatch(self, request, *args, **kwargs): response = super(TestNamedUrlSessionWizardView, self).dispatch(request, *args, **kwargs) return response, self class TestNamedUrlCookieWizardView(NamedUrlCookieWizardView): def dispatch(self, request, *args, **kwargs): response = super(TestNamedUrlCookieWizardView, self).dispatch(request, *args, **kwargs) return response, self @skipIfCustomUser class NamedSessionFormTests(NamedFormTests, TestCase): formwizard_class = TestNamedUrlSessionWizardView wizard_urlname = 'nwiz_session' @skipIfCustomUser class NamedCookieFormTests(NamedFormTests, TestCase): formwizard_class = TestNamedUrlCookieWizardView wizard_urlname = 'nwiz_cookie'
gpl-2.0
MSOpenTech/edx-platform
common/djangoapps/student/migrations/0005_name_change.py
188
10730
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'PendingEmailChange' db.create_table('student_pendingemailchange', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)), ('new_email', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, blank=True)), ('activation_key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=32, db_index=True)), )) db.send_create_signal('student', ['PendingEmailChange']) # Adding model 'PendingNameChange' db.create_table('student_pendingnamechange', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)), ('new_name', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('rationale', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)), )) db.send_create_signal('student', ['PendingNameChange']) # Changing field 'UserProfile.user' db.alter_column('auth_userprofile', 'user_id', self.gf('django.db.models.fields.related.OneToOneField')(unique=True, to=orm['auth.User'])) def backwards(self, orm): # Deleting model 'PendingEmailChange' db.delete_table('student_pendingemailchange') # Deleting model 'PendingNameChange' db.delete_table('student_pendingnamechange') # Changing field 'UserProfile.user' db.alter_column('auth_userprofile', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}), 'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}), 'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'student.pendingemailchange': { 'Meta': {'object_name': 'PendingEmailChange'}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.pendingnamechange': { 'Meta': {'object_name': 'PendingNameChange'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.registration': { 'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.userprofile': { 'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"}, 'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}) }, 'student.usertestgroup': { 'Meta': {'object_name': 'UserTestGroup'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'}) } } complete_apps = ['student']
agpl-3.0
t3dev/odoo
addons/test_mass_mailing/tests/test_blacklist_mixin.py
8
3038
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from unittest.mock import patch from odoo.tests.common import users from odoo.addons.test_mass_mailing.tests import common from odoo.addons.test_mass_mailing.models.mass_mail_test import MassMailTestBlacklist from odoo.exceptions import AccessError, UserError class TestBLMixin(common.MassMailingCase): @classmethod def setUpClass(cls): super(TestBLMixin, cls).setUpClass() cls.env['mail.blacklist'].create([{ 'email': 'Arya.Stark@example.com', 'active': True, }, { 'email': 'Sansa.Stark@example.com', 'active': False, }]) @users('emp') def test_bl_mixin_primary_field_consistency(self): MassMailTestBlacklist._primary_email = 'not_a_field' with self.assertRaises(UserError): self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)]) MassMailTestBlacklist._primary_email = ['not_a_str'] with self.assertRaises(UserError): self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)]) MassMailTestBlacklist._primary_email = 'email_from' self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)]) @users('emp') def test_bl_mixin_is_blacklisted(self): """ Test is_blacklisted field computation """ record = self.env['mass.mail.test.bl'].create({'email_from': 'arya.stark@example.com'}) self.assertTrue(record.is_blacklisted) record = self.env['mass.mail.test.bl'].create({'email_from': 'not.arya.stark@example.com'}) self.assertFalse(record.is_blacklisted) @users('emp') def test_bl_mixin_search_blacklisted(self): """ Test is_blacklisted field search implementation """ record1 = self.env['mass.mail.test.bl'].create({'email_from': 'arya.stark@example.com'}) record2 = self.env['mass.mail.test.bl'].create({'email_from': 'not.arya.stark@example.com'}) search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', False)]) self.assertEqual(search_res, record2) search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '!=', True)]) self.assertEqual(search_res, record2) search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', True)]) self.assertEqual(search_res, record1) search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '!=', False)]) self.assertEqual(search_res, record1) @users('emp') def test_bl_mixin_search_blacklisted_format(self): """ Test is_blacklisted field search using email parsing """ record1 = self.env['mass.mail.test.bl'].create({'email_from': 'Arya Stark <arya.stark@example.com>'}) self.assertTrue(record1.is_blacklisted) search_res = self.env['mass.mail.test.bl'].search([('is_blacklisted', '=', True)]) self.assertEqual(search_res, record1)
gpl-3.0
kprkpr/platform_external_gtest
test/gtest_xml_test_utils.py
42
6871
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test utilities for gtest_xml_output""" __author__ = 'eefacm@gmail.com (Sean Mcafee)' import re import unittest from xml.dom import minidom, Node GTEST_OUTPUT_FLAG = "--gtest_output" GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml" class GTestXMLTestCase(unittest.TestCase): """ Base class for tests of Google Test's XML output functionality. """ def AssertEquivalentNodes(self, expected_node, actual_node): """ Asserts that actual_node (a DOM node object) is equivalent to expected_node (another DOM node object), in that either both of them are CDATA nodes and have the same value, or both are DOM elements and actual_node meets all of the following conditions: * It has the same tag name as expected_node. * It has the same set of attributes as expected_node, each with the same value as the corresponding attribute of expected_node. An exception is any attribute named "time", which needs only be convertible to a floating-point number. * It has an equivalent set of child nodes (including elements and CDATA sections) as expected_node. Note that we ignore the order of the children as they are not guaranteed to be in any particular order. """ if expected_node.nodeType == Node.CDATA_SECTION_NODE: self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType) self.assertEquals(expected_node.nodeValue, actual_node.nodeValue) return self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType) self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType) self.assertEquals(expected_node.tagName, actual_node.tagName) expected_attributes = expected_node.attributes actual_attributes = actual_node .attributes self.assertEquals(expected_attributes.length, actual_attributes.length) for i in range(expected_attributes.length): expected_attr = expected_attributes.item(i) actual_attr = actual_attributes.get(expected_attr.name) self.assert_(actual_attr is not None) self.assertEquals(expected_attr.value, actual_attr.value) expected_children = self._GetChildren(expected_node) actual_children = self._GetChildren(actual_node) self.assertEquals(len(expected_children), len(actual_children)) for child_id, child in expected_children.iteritems(): self.assert_(child_id in actual_children, '<%s> is not in <%s>' % (child_id, actual_children)) self.AssertEquivalentNodes(child, actual_children[child_id]) identifying_attribute = { "testsuite": "name", "testcase": "name", "failure": "message", } def _GetChildren(self, element): """ Fetches all of the child nodes of element, a DOM Element object. Returns them as the values of a dictionary keyed by the IDs of the children. For <testsuite> and <testcase> elements, the ID is the value of their "name" attribute; for <failure> elements, it is the value of the "message" attribute; for CDATA section node, it is "detail". An exception is raised if any element other than the above four is encountered, if two child elements with the same identifying attributes are encountered, or if any other type of node is encountered, other than Text nodes containing only whitespace. """ children = {} for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.assert_(child.tagName in self.identifying_attribute, "Encountered unknown element <%s>" % child.tagName) childID = child.getAttribute(self.identifying_attribute[child.tagName]) self.assert_(childID not in children) children[childID] = child elif child.nodeType == Node.TEXT_NODE: self.assert_(child.nodeValue.isspace()) elif child.nodeType == Node.CDATA_SECTION_NODE: self.assert_("detail" not in children) children["detail"] = child else: self.fail("Encountered unexpected node type %d" % child.nodeType) return children def NormalizeXml(self, element): """ Normalizes Google Test's XML output to eliminate references to transient information that may change from run to run. * The "time" attribute of <testsuite> and <testcase> elements is replaced with a single asterisk, if it contains only digit characters. * The line number reported in the first line of the "message" attribute of <failure> elements is replaced with a single asterisk. * The directory names in file paths are removed. * The stack traces are removed. """ if element.tagName in ("testsuite", "testcase"): time = element.getAttributeNode("time") time.value = re.sub(r"^\d+(\.\d+)?$", "*", time.value) elif element.tagName == "failure": for child in element.childNodes: if child.nodeType == Node.CDATA_SECTION_NODE: # Removes the source line number. cdata = re.sub(r"^.*/(.*:)\d+\n", "\\1*\n", child.nodeValue) # Removes the actual stack trace. child.nodeValue = re.sub(r"\nStack trace:\n(.|\n)*", "", cdata) for child in element.childNodes: if child.nodeType == Node.ELEMENT_NODE: self.NormalizeXml(child)
bsd-3-clause
ghtmtt/QGIS
tests/src/python/test_qgsmergedfeaturerenderer.py
19
7103
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsMergedFeatureRenderer .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Nyall Dawson' __date__ = '30/12/2020' __copyright__ = 'Copyright 2020, The QGIS Project' import qgis # NOQA import os from qgis.PyQt.QtCore import QSize, QDir, Qt from qgis.PyQt.QtGui import QColor from qgis.core import (QgsRenderChecker, QgsMapSettings, QgsVectorLayer, QgsMergedFeatureRenderer, QgsSingleSymbolRenderer, QgsFillSymbol, QgsSimpleFillSymbolLayer, QgsCategorizedSymbolRenderer, QgsRendererCategory, QgsSimpleLineSymbolLayer, QgsMarkerLineSymbolLayer, QgsLineSymbol, QgsTemplatedLineSymbolLayerBase, QgsMarkerSymbol, QgsMarkerSymbolLayer ) from qgis.testing import unittest from utilities import unitTestDataPath TEST_DATA_DIR = unitTestDataPath() class TestQgsMergedFeatureRenderer(unittest.TestCase): def setUp(self): self.report = "<h1>Python QgsMergedFeatureRenderer Tests</h1>\n" def tearDown(self): report_file_path = "%s/qgistest.html" % QDir.tempPath() with open(report_file_path, 'a') as report_file: report_file.write(self.report) def testSinglePolys(self): source = QgsVectorLayer(os.path.join(TEST_DATA_DIR, 'polys_overlapping.shp')) self.assertTrue(source.isValid()) map_settings = QgsMapSettings() map_settings.setExtent(source.extent()) map_settings.setDestinationCrs(source.crs()) map_settings.setLayers([source]) layer = QgsSimpleFillSymbolLayer() layer.setStrokeColor(QColor(0, 0, 0)) layer.setStrokeWidth(1) layer.setColor(QColor(200, 250, 50)) symbol = QgsFillSymbol([layer]) sub_renderer = QgsSingleSymbolRenderer(symbol) source.setRenderer(QgsMergedFeatureRenderer(sub_renderer)) self.assertTrue(self.imageCheck('single_subrenderer', 'single_subrenderer', map_settings)) def testCategorizedPolys(self): source = QgsVectorLayer(os.path.join(TEST_DATA_DIR, 'polys_overlapping_with_cat.shp')) self.assertTrue(source.isValid()) map_settings = QgsMapSettings() map_settings.setExtent(source.extent()) map_settings.setDestinationCrs(source.crs()) map_settings.setLayers([source]) layer = QgsSimpleFillSymbolLayer() layer.setStrokeColor(QColor(0, 0, 0)) layer.setStrokeWidth(1) layer.setColor(QColor(200, 250, 50, 150)) symbol1 = QgsFillSymbol() symbol1.changeSymbolLayer(0, layer) layer = QgsSimpleFillSymbolLayer() layer.setStrokeColor(QColor(0, 0, 0)) layer.setStrokeWidth(1) layer.setColor(QColor(50, 250, 200, 150)) symbol2 = QgsFillSymbol() symbol2.changeSymbolLayer(0, layer) sub_renderer = QgsCategorizedSymbolRenderer('cat', [QgsRendererCategory('cat1', symbol1, 'cat1'), QgsRendererCategory('cat2', symbol2, 'cat2') ]) source.setRenderer(QgsMergedFeatureRenderer(sub_renderer)) self.assertTrue(self.imageCheck('polys_categorizedrenderer', 'polys_categorizedrenderer', map_settings)) def testSingleLines(self): source = QgsVectorLayer(os.path.join(TEST_DATA_DIR, 'lines_touching.shp')) self.assertTrue(source.isValid()) map_settings = QgsMapSettings() map_settings.setExtent(source.extent().buffered(2)) map_settings.setDestinationCrs(source.crs()) map_settings.setLayers([source]) layer = QgsSimpleLineSymbolLayer() layer.setColor(QColor(0, 0, 0)) layer.setWidth(1) symbol = QgsLineSymbol([layer]) layer2 = QgsMarkerLineSymbolLayer() layer2.setPlacement(QgsTemplatedLineSymbolLayerBase.FirstVertex) marker = QgsMarkerSymbol.createSimple({'size': '4', 'color': '255,0,0', 'outline_style': 'no'}) layer2.setSubSymbol(marker) symbol.appendSymbolLayer(layer2) sub_renderer = QgsSingleSymbolRenderer(symbol) source.setRenderer(QgsMergedFeatureRenderer(sub_renderer)) self.assertTrue(self.imageCheck('lines_single_subrenderer', 'lines_single_subrenderer', map_settings)) def testLinesCategorized(self): source = QgsVectorLayer(os.path.join(TEST_DATA_DIR, 'lines_touching.shp')) self.assertTrue(source.isValid()) map_settings = QgsMapSettings() map_settings.setExtent(source.extent().buffered(2)) map_settings.setDestinationCrs(source.crs()) map_settings.setLayers([source]) layer = QgsSimpleLineSymbolLayer() layer.setColor(QColor(0, 0, 0)) layer.setWidth(1) symbol1 = QgsLineSymbol() symbol1.changeSymbolLayer(0, layer.clone()) layer2 = QgsMarkerLineSymbolLayer() layer2.setPlacement(QgsTemplatedLineSymbolLayerBase.FirstVertex) marker = QgsMarkerSymbol.createSimple({'size': '4', 'color': '255,0,0', 'outline_style': 'no'}) layer2.setSubSymbol(marker) symbol1.appendSymbolLayer(layer2) symbol2 = QgsLineSymbol() symbol2.changeSymbolLayer(0, layer.clone()) layer2 = QgsMarkerLineSymbolLayer() layer2.setPlacement(QgsTemplatedLineSymbolLayerBase.FirstVertex) marker = QgsMarkerSymbol.createSimple({'size': '4', 'color': '0,255,0', 'outline_style': 'no'}) layer2.setSubSymbol(marker) symbol2.appendSymbolLayer(layer2) sub_renderer = QgsCategorizedSymbolRenderer('cat', [QgsRendererCategory('cat1', symbol1, 'cat1'), QgsRendererCategory('cat2', symbol2, 'cat2') ]) source.setRenderer(QgsMergedFeatureRenderer(sub_renderer)) self.assertTrue(self.imageCheck('lines_categorized_subrenderer', 'lines_categorized_subrenderer', map_settings)) def imageCheck(self, name, reference_image, map_settings): map_settings.setOutputDpi(96) self.report += "<h2>Render {}</h2>\n".format(name) checker = QgsRenderChecker() checker.setControlPathPrefix("mergedfeaturerenderer") checker.setControlName("expected_" + reference_image) checker.setMapSettings(map_settings) checker.setColorTolerance(2) result = checker.runTest(name, 20) self.report += checker.report() print(self.report) return result if __name__ == '__main__': unittest.main()
gpl-2.0
stevenaubertin/showsServer
lib/werkzeug/testsuite/http.py
145
18911
# -*- coding: utf-8 -*- """ werkzeug.testsuite.http ~~~~~~~~~~~~~~~~~~~~~~~ HTTP parsing utilities. :copyright: (c) 2014 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import unittest from datetime import datetime from werkzeug.testsuite import WerkzeugTestCase from werkzeug._compat import itervalues, wsgi_encoding_dance from werkzeug import http, datastructures from werkzeug.test import create_environ class HTTPUtilityTestCase(WerkzeugTestCase): def test_accept(self): a = http.parse_accept_header('en-us,ru;q=0.5') self.assert_equal(list(itervalues(a)), ['en-us', 'ru']) self.assert_equal(a.best, 'en-us') self.assert_equal(a.find('ru'), 1) self.assert_raises(ValueError, a.index, 'de') self.assert_equal(a.to_header(), 'en-us,ru;q=0.5') def test_mime_accept(self): a = http.parse_accept_header('text/xml,application/xml,' 'application/xhtml+xml,' 'text/html;q=0.9,text/plain;q=0.8,' 'image/png,*/*;q=0.5', datastructures.MIMEAccept) self.assert_raises(ValueError, lambda: a['missing']) self.assert_equal(a['image/png'], 1) self.assert_equal(a['text/plain'], 0.8) self.assert_equal(a['foo/bar'], 0.5) self.assert_equal(a[a.find('foo/bar')], ('*/*', 0.5)) def test_accept_matches(self): a = http.parse_accept_header('text/xml,application/xml,application/xhtml+xml,' 'text/html;q=0.9,text/plain;q=0.8,' 'image/png', datastructures.MIMEAccept) self.assert_equal(a.best_match(['text/html', 'application/xhtml+xml']), 'application/xhtml+xml') self.assert_equal(a.best_match(['text/html']), 'text/html') self.assert_true(a.best_match(['foo/bar']) is None) self.assert_equal(a.best_match(['foo/bar', 'bar/foo'], default='foo/bar'), 'foo/bar') self.assert_equal(a.best_match(['application/xml', 'text/xml']), 'application/xml') def test_charset_accept(self): a = http.parse_accept_header('ISO-8859-1,utf-8;q=0.7,*;q=0.7', datastructures.CharsetAccept) self.assert_equal(a['iso-8859-1'], a['iso8859-1']) self.assert_equal(a['iso-8859-1'], 1) self.assert_equal(a['UTF8'], 0.7) self.assert_equal(a['ebcdic'], 0.7) def test_language_accept(self): a = http.parse_accept_header('de-AT,de;q=0.8,en;q=0.5', datastructures.LanguageAccept) self.assert_equal(a.best, 'de-AT') self.assert_true('de_AT' in a) self.assert_true('en' in a) self.assert_equal(a['de-at'], 1) self.assert_equal(a['en'], 0.5) def test_set_header(self): hs = http.parse_set_header('foo, Bar, "Blah baz", Hehe') self.assert_true('blah baz' in hs) self.assert_true('foobar' not in hs) self.assert_true('foo' in hs) self.assert_equal(list(hs), ['foo', 'Bar', 'Blah baz', 'Hehe']) hs.add('Foo') self.assert_equal(hs.to_header(), 'foo, Bar, "Blah baz", Hehe') def test_list_header(self): hl = http.parse_list_header('foo baz, blah') self.assert_equal(hl, ['foo baz', 'blah']) def test_dict_header(self): d = http.parse_dict_header('foo="bar baz", blah=42') self.assert_equal(d, {'foo': 'bar baz', 'blah': '42'}) def test_cache_control_header(self): cc = http.parse_cache_control_header('max-age=0, no-cache') assert cc.max_age == 0 assert cc.no_cache cc = http.parse_cache_control_header('private, community="UCI"', None, datastructures.ResponseCacheControl) assert cc.private assert cc['community'] == 'UCI' c = datastructures.ResponseCacheControl() assert c.no_cache is None assert c.private is None c.no_cache = True assert c.no_cache == '*' c.private = True assert c.private == '*' del c.private assert c.private is None assert c.to_header() == 'no-cache' def test_authorization_header(self): a = http.parse_authorization_header('Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==') assert a.type == 'basic' assert a.username == 'Aladdin' assert a.password == 'open sesame' a = http.parse_authorization_header('''Digest username="Mufasa", realm="testrealm@host.invalid", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", uri="/dir/index.html", qop=auth, nc=00000001, cnonce="0a4f113b", response="6629fae49393a05397450978507c4ef1", opaque="5ccc069c403ebaf9f0171e9517f40e41"''') assert a.type == 'digest' assert a.username == 'Mufasa' assert a.realm == 'testrealm@host.invalid' assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093' assert a.uri == '/dir/index.html' assert 'auth' in a.qop assert a.nc == '00000001' assert a.cnonce == '0a4f113b' assert a.response == '6629fae49393a05397450978507c4ef1' assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41' a = http.parse_authorization_header('''Digest username="Mufasa", realm="testrealm@host.invalid", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", uri="/dir/index.html", response="e257afa1414a3340d93d30955171dd0e", opaque="5ccc069c403ebaf9f0171e9517f40e41"''') assert a.type == 'digest' assert a.username == 'Mufasa' assert a.realm == 'testrealm@host.invalid' assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093' assert a.uri == '/dir/index.html' assert a.response == 'e257afa1414a3340d93d30955171dd0e' assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41' assert http.parse_authorization_header('') is None assert http.parse_authorization_header(None) is None assert http.parse_authorization_header('foo') is None def test_www_authenticate_header(self): wa = http.parse_www_authenticate_header('Basic realm="WallyWorld"') assert wa.type == 'basic' assert wa.realm == 'WallyWorld' wa.realm = 'Foo Bar' assert wa.to_header() == 'Basic realm="Foo Bar"' wa = http.parse_www_authenticate_header('''Digest realm="testrealm@host.com", qop="auth,auth-int", nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", opaque="5ccc069c403ebaf9f0171e9517f40e41"''') assert wa.type == 'digest' assert wa.realm == 'testrealm@host.com' assert 'auth' in wa.qop assert 'auth-int' in wa.qop assert wa.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093' assert wa.opaque == '5ccc069c403ebaf9f0171e9517f40e41' wa = http.parse_www_authenticate_header('broken') assert wa.type == 'broken' assert not http.parse_www_authenticate_header('').type assert not http.parse_www_authenticate_header('') def test_etags(self): assert http.quote_etag('foo') == '"foo"' assert http.quote_etag('foo', True) == 'w/"foo"' assert http.unquote_etag('"foo"') == ('foo', False) assert http.unquote_etag('w/"foo"') == ('foo', True) es = http.parse_etags('"foo", "bar", w/"baz", blar') assert sorted(es) == ['bar', 'blar', 'foo'] assert 'foo' in es assert 'baz' not in es assert es.contains_weak('baz') assert 'blar' in es assert es.contains_raw('w/"baz"') assert es.contains_raw('"foo"') assert sorted(es.to_header().split(', ')) == ['"bar"', '"blar"', '"foo"', 'w/"baz"'] def test_etags_nonzero(self): etags = http.parse_etags('w/"foo"') self.assert_true(bool(etags)) self.assert_true(etags.contains_raw('w/"foo"')) def test_parse_date(self): assert http.parse_date('Sun, 06 Nov 1994 08:49:37 GMT ') == datetime(1994, 11, 6, 8, 49, 37) assert http.parse_date('Sunday, 06-Nov-94 08:49:37 GMT') == datetime(1994, 11, 6, 8, 49, 37) assert http.parse_date(' Sun Nov 6 08:49:37 1994') == datetime(1994, 11, 6, 8, 49, 37) assert http.parse_date('foo') is None def test_parse_date_overflows(self): assert http.parse_date(' Sun 02 Feb 1343 08:49:37 GMT') == datetime(1343, 2, 2, 8, 49, 37) assert http.parse_date('Thu, 01 Jan 1970 00:00:00 GMT') == datetime(1970, 1, 1, 0, 0) assert http.parse_date('Thu, 33 Jan 1970 00:00:00 GMT') is None def test_remove_entity_headers(self): now = http.http_date() headers1 = [('Date', now), ('Content-Type', 'text/html'), ('Content-Length', '0')] headers2 = datastructures.Headers(headers1) http.remove_entity_headers(headers1) assert headers1 == [('Date', now)] http.remove_entity_headers(headers2) self.assert_equal(headers2, datastructures.Headers([(u'Date', now)])) def test_remove_hop_by_hop_headers(self): headers1 = [('Connection', 'closed'), ('Foo', 'bar'), ('Keep-Alive', 'wtf')] headers2 = datastructures.Headers(headers1) http.remove_hop_by_hop_headers(headers1) assert headers1 == [('Foo', 'bar')] http.remove_hop_by_hop_headers(headers2) assert headers2 == datastructures.Headers([('Foo', 'bar')]) def test_parse_options_header(self): assert http.parse_options_header(r'something; foo="other\"thing"') == \ ('something', {'foo': 'other"thing'}) assert http.parse_options_header(r'something; foo="other\"thing"; meh=42') == \ ('something', {'foo': 'other"thing', 'meh': '42'}) assert http.parse_options_header(r'something; foo="other\"thing"; meh=42; bleh') == \ ('something', {'foo': 'other"thing', 'meh': '42', 'bleh': None}) assert http.parse_options_header('something; foo="other;thing"; meh=42; bleh') == \ ('something', {'foo': 'other;thing', 'meh': '42', 'bleh': None}) assert http.parse_options_header('something; foo="otherthing"; meh=; bleh') == \ ('something', {'foo': 'otherthing', 'meh': None, 'bleh': None}) def test_dump_options_header(self): assert http.dump_options_header('foo', {'bar': 42}) == \ 'foo; bar=42' assert http.dump_options_header('foo', {'bar': 42, 'fizz': None}) in \ ('foo; bar=42; fizz', 'foo; fizz; bar=42') def test_dump_header(self): assert http.dump_header([1, 2, 3]) == '1, 2, 3' assert http.dump_header([1, 2, 3], allow_token=False) == '"1", "2", "3"' assert http.dump_header({'foo': 'bar'}, allow_token=False) == 'foo="bar"' assert http.dump_header({'foo': 'bar'}) == 'foo=bar' def test_is_resource_modified(self): env = create_environ() # ignore POST env['REQUEST_METHOD'] = 'POST' assert not http.is_resource_modified(env, etag='testing') env['REQUEST_METHOD'] = 'GET' # etagify from data self.assert_raises(TypeError, http.is_resource_modified, env, data='42', etag='23') env['HTTP_IF_NONE_MATCH'] = http.generate_etag(b'awesome') assert not http.is_resource_modified(env, data=b'awesome') env['HTTP_IF_MODIFIED_SINCE'] = http.http_date(datetime(2008, 1, 1, 12, 30)) assert not http.is_resource_modified(env, last_modified=datetime(2008, 1, 1, 12, 00)) assert http.is_resource_modified(env, last_modified=datetime(2008, 1, 1, 13, 00)) def test_date_formatting(self): assert http.cookie_date(0) == 'Thu, 01-Jan-1970 00:00:00 GMT' assert http.cookie_date(datetime(1970, 1, 1)) == 'Thu, 01-Jan-1970 00:00:00 GMT' assert http.http_date(0) == 'Thu, 01 Jan 1970 00:00:00 GMT' assert http.http_date(datetime(1970, 1, 1)) == 'Thu, 01 Jan 1970 00:00:00 GMT' def test_cookies(self): self.assert_strict_equal( dict(http.parse_cookie('dismiss-top=6; CP=null*; PHPSESSID=0a539d42abc001cd' 'c762809248d4beed; a=42; b="\\\";"')), { 'CP': u'null*', 'PHPSESSID': u'0a539d42abc001cdc762809248d4beed', 'a': u'42', 'dismiss-top': u'6', 'b': u'\";' } ) self.assert_strict_equal( set(http.dump_cookie('foo', 'bar baz blub', 360, httponly=True, sync_expires=False).split(u'; ')), set([u'HttpOnly', u'Max-Age=360', u'Path=/', u'foo="bar baz blub"']) ) self.assert_strict_equal(dict(http.parse_cookie('fo234{=bar; blub=Blah')), {'fo234{': u'bar', 'blub': u'Blah'}) def test_cookie_quoting(self): val = http.dump_cookie("foo", "?foo") self.assert_strict_equal(val, 'foo="?foo"; Path=/') self.assert_strict_equal(dict(http.parse_cookie(val)), {'foo': u'?foo'}) self.assert_strict_equal(dict(http.parse_cookie(r'foo="foo\054bar"')), {'foo': u'foo,bar'}) def test_cookie_domain_resolving(self): val = http.dump_cookie('foo', 'bar', domain=u'\N{SNOWMAN}.com') self.assert_strict_equal(val, 'foo=bar; Domain=xn--n3h.com; Path=/') def test_cookie_unicode_dumping(self): val = http.dump_cookie('foo', u'\N{SNOWMAN}') h = datastructures.Headers() h.add('Set-Cookie', val) self.assert_equal(h['Set-Cookie'], 'foo="\\342\\230\\203"; Path=/') cookies = http.parse_cookie(h['Set-Cookie']) self.assert_equal(cookies['foo'], u'\N{SNOWMAN}') def test_cookie_unicode_keys(self): # Yes, this is technically against the spec but happens val = http.dump_cookie(u'fö', u'fö') self.assert_equal(val, wsgi_encoding_dance(u'fö="f\\303\\266"; Path=/', 'utf-8')) cookies = http.parse_cookie(val) self.assert_equal(cookies[u'fö'], u'fö') def test_cookie_unicode_parsing(self): # This is actually a correct test. This is what is being submitted # by firefox if you set an unicode cookie and we get the cookie sent # in on Python 3 under PEP 3333. cookies = http.parse_cookie(u'fö=fö') self.assert_equal(cookies[u'fö'], u'fö') def test_cookie_domain_encoding(self): val = http.dump_cookie('foo', 'bar', domain=u'\N{SNOWMAN}.com') self.assert_strict_equal(val, 'foo=bar; Domain=xn--n3h.com; Path=/') val = http.dump_cookie('foo', 'bar', domain=u'.\N{SNOWMAN}.com') self.assert_strict_equal(val, 'foo=bar; Domain=.xn--n3h.com; Path=/') val = http.dump_cookie('foo', 'bar', domain=u'.foo.com') self.assert_strict_equal(val, 'foo=bar; Domain=.foo.com; Path=/') class RangeTestCase(WerkzeugTestCase): def test_if_range_parsing(self): rv = http.parse_if_range_header('"Test"') assert rv.etag == 'Test' assert rv.date is None assert rv.to_header() == '"Test"' # weak information is dropped rv = http.parse_if_range_header('w/"Test"') assert rv.etag == 'Test' assert rv.date is None assert rv.to_header() == '"Test"' # broken etags are supported too rv = http.parse_if_range_header('bullshit') assert rv.etag == 'bullshit' assert rv.date is None assert rv.to_header() == '"bullshit"' rv = http.parse_if_range_header('Thu, 01 Jan 1970 00:00:00 GMT') assert rv.etag is None assert rv.date == datetime(1970, 1, 1) assert rv.to_header() == 'Thu, 01 Jan 1970 00:00:00 GMT' for x in '', None: rv = http.parse_if_range_header(x) assert rv.etag is None assert rv.date is None assert rv.to_header() == '' def test_range_parsing(): rv = http.parse_range_header('bytes=52') assert rv is None rv = http.parse_range_header('bytes=52-') assert rv.units == 'bytes' assert rv.ranges == [(52, None)] assert rv.to_header() == 'bytes=52-' rv = http.parse_range_header('bytes=52-99') assert rv.units == 'bytes' assert rv.ranges == [(52, 100)] assert rv.to_header() == 'bytes=52-99' rv = http.parse_range_header('bytes=52-99,-1000') assert rv.units == 'bytes' assert rv.ranges == [(52, 100), (-1000, None)] assert rv.to_header() == 'bytes=52-99,-1000' rv = http.parse_range_header('bytes = 1 - 100') assert rv.units == 'bytes' assert rv.ranges == [(1, 101)] assert rv.to_header() == 'bytes=1-100' rv = http.parse_range_header('AWesomes=0-999') assert rv.units == 'awesomes' assert rv.ranges == [(0, 1000)] assert rv.to_header() == 'awesomes=0-999' def test_content_range_parsing(): rv = http.parse_content_range_header('bytes 0-98/*') assert rv.units == 'bytes' assert rv.start == 0 assert rv.stop == 99 assert rv.length is None assert rv.to_header() == 'bytes 0-98/*' rv = http.parse_content_range_header('bytes 0-98/*asdfsa') assert rv is None rv = http.parse_content_range_header('bytes 0-99/100') assert rv.to_header() == 'bytes 0-99/100' rv.start = None rv.stop = None assert rv.units == 'bytes' assert rv.to_header() == 'bytes */100' rv = http.parse_content_range_header('bytes */100') assert rv.start is None assert rv.stop is None assert rv.length == 100 assert rv.units == 'bytes' class RegressionTestCase(WerkzeugTestCase): def test_best_match_works(self): # was a bug in 0.6 rv = http.parse_accept_header('foo=,application/xml,application/xhtml+xml,' 'text/html;q=0.9,text/plain;q=0.8,' 'image/png,*/*;q=0.5', datastructures.MIMEAccept).best_match(['foo/bar']) self.assert_equal(rv, 'foo/bar') def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(HTTPUtilityTestCase)) suite.addTest(unittest.makeSuite(RegressionTestCase)) return suite
apache-2.0
jlspyaozhongkai/Uter
third_party_backup/Python-2.7.9/Lib/nntplib.py
92
21470
"""An NNTP client class based on RFC 977: Network News Transfer Protocol. Example: >>> from nntplib import NNTP >>> s = NNTP('news') >>> resp, count, first, last, name = s.group('comp.lang.python') >>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last Group comp.lang.python has 51 articles, range 5770 to 5821 >>> resp, subs = s.xhdr('subject', first + '-' + last) >>> resp = s.quit() >>> Here 'resp' is the server response line. Error responses are turned into exceptions. To post an article from a file: >>> f = open(filename, 'r') # file containing article, including header >>> resp = s.post(f) >>> For descriptions of all methods, read the comments in the code below. Note that all arguments and return values representing article numbers are strings, not numbers, since they are rarely used for calculations. """ # RFC 977 by Brian Kantor and Phil Lapsley. # xover, xgtitle, xpath, date methods by Kevan Heydon # Imports import re import socket __all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError", "NNTPPermanentError","NNTPProtocolError","NNTPDataError", "error_reply","error_temp","error_perm","error_proto", "error_data",] # maximal line length when calling readline(). This is to prevent # reading arbitrary length lines. RFC 3977 limits NNTP line length to # 512 characters, including CRLF. We have selected 2048 just to be on # the safe side. _MAXLINE = 2048 # Exceptions raised when an error or invalid response is received class NNTPError(Exception): """Base class for all nntplib exceptions""" def __init__(self, *args): Exception.__init__(self, *args) try: self.response = args[0] except IndexError: self.response = 'No response given' class NNTPReplyError(NNTPError): """Unexpected [123]xx reply""" pass class NNTPTemporaryError(NNTPError): """4xx errors""" pass class NNTPPermanentError(NNTPError): """5xx errors""" pass class NNTPProtocolError(NNTPError): """Response does not begin with [1-5]""" pass class NNTPDataError(NNTPError): """Error in response data""" pass # for backwards compatibility error_reply = NNTPReplyError error_temp = NNTPTemporaryError error_perm = NNTPPermanentError error_proto = NNTPProtocolError error_data = NNTPDataError # Standard port used by NNTP servers NNTP_PORT = 119 # Response numbers that are followed by additional text (e.g. article) LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282'] # Line terminators (we always output CRLF, but accept any of CRLF, CR, LF) CRLF = '\r\n' # The class itself class NNTP: def __init__(self, host, port=NNTP_PORT, user=None, password=None, readermode=None, usenetrc=True): """Initialize an instance. Arguments: - host: hostname to connect to - port: port to connect to (default the standard NNTP port) - user: username to authenticate with - password: password to use with username - readermode: if true, send 'mode reader' command after connecting. readermode is sometimes necessary if you are connecting to an NNTP server on the local machine and intend to call reader-specific commands, such as `group'. If you get unexpected NNTPPermanentErrors, you might need to set readermode. """ self.host = host self.port = port self.sock = socket.create_connection((host, port)) self.file = self.sock.makefile('rb') self.debugging = 0 self.welcome = self.getresp() # 'mode reader' is sometimes necessary to enable 'reader' mode. # However, the order in which 'mode reader' and 'authinfo' need to # arrive differs between some NNTP servers. Try to send # 'mode reader', and if it fails with an authorization failed # error, try again after sending authinfo. readermode_afterauth = 0 if readermode: try: self.welcome = self.shortcmd('mode reader') except NNTPPermanentError: # error 500, probably 'not implemented' pass except NNTPTemporaryError, e: if user and e.response[:3] == '480': # Need authorization before 'mode reader' readermode_afterauth = 1 else: raise # If no login/password was specified, try to get them from ~/.netrc # Presume that if .netc has an entry, NNRP authentication is required. try: if usenetrc and not user: import netrc credentials = netrc.netrc() auth = credentials.authenticators(host) if auth: user = auth[0] password = auth[2] except IOError: pass # Perform NNRP authentication if needed. if user: resp = self.shortcmd('authinfo user '+user) if resp[:3] == '381': if not password: raise NNTPReplyError(resp) else: resp = self.shortcmd( 'authinfo pass '+password) if resp[:3] != '281': raise NNTPPermanentError(resp) if readermode_afterauth: try: self.welcome = self.shortcmd('mode reader') except NNTPPermanentError: # error 500, probably 'not implemented' pass # Get the welcome message from the server # (this is read and squirreled away by __init__()). # If the response code is 200, posting is allowed; # if it 201, posting is not allowed def getwelcome(self): """Get the welcome message from the server (this is read and squirreled away by __init__()). If the response code is 200, posting is allowed; if it 201, posting is not allowed.""" if self.debugging: print '*welcome*', repr(self.welcome) return self.welcome def set_debuglevel(self, level): """Set the debugging level. Argument 'level' means: 0: no debugging output (default) 1: print commands and responses but not body text etc. 2: also print raw lines read and sent before stripping CR/LF""" self.debugging = level debug = set_debuglevel def putline(self, line): """Internal: send one line to the server, appending CRLF.""" line = line + CRLF if self.debugging > 1: print '*put*', repr(line) self.sock.sendall(line) def putcmd(self, line): """Internal: send one command to the server (through putline()).""" if self.debugging: print '*cmd*', repr(line) self.putline(line) def getline(self): """Internal: return one line from the server, stripping CRLF. Raise EOFError if the connection is closed.""" line = self.file.readline(_MAXLINE + 1) if len(line) > _MAXLINE: raise NNTPDataError('line too long') if self.debugging > 1: print '*get*', repr(line) if not line: raise EOFError if line[-2:] == CRLF: line = line[:-2] elif line[-1:] in CRLF: line = line[:-1] return line def getresp(self): """Internal: get a response from the server. Raise various errors if the response indicates an error.""" resp = self.getline() if self.debugging: print '*resp*', repr(resp) c = resp[:1] if c == '4': raise NNTPTemporaryError(resp) if c == '5': raise NNTPPermanentError(resp) if c not in '123': raise NNTPProtocolError(resp) return resp def getlongresp(self, file=None): """Internal: get a response plus following text from the server. Raise various errors if the response indicates an error.""" openedFile = None try: # If a string was passed then open a file with that name if isinstance(file, str): openedFile = file = open(file, "w") resp = self.getresp() if resp[:3] not in LONGRESP: raise NNTPReplyError(resp) list = [] while 1: line = self.getline() if line == '.': break if line[:2] == '..': line = line[1:] if file: file.write(line + "\n") else: list.append(line) finally: # If this method created the file, then it must close it if openedFile: openedFile.close() return resp, list def shortcmd(self, line): """Internal: send a command and get the response.""" self.putcmd(line) return self.getresp() def longcmd(self, line, file=None): """Internal: send a command and get the response plus following text.""" self.putcmd(line) return self.getlongresp(file) def newgroups(self, date, time, file=None): """Process a NEWGROUPS command. Arguments: - date: string 'yymmdd' indicating the date - time: string 'hhmmss' indicating the time Return: - resp: server response if successful - list: list of newsgroup names""" return self.longcmd('NEWGROUPS ' + date + ' ' + time, file) def newnews(self, group, date, time, file=None): """Process a NEWNEWS command. Arguments: - group: group name or '*' - date: string 'yymmdd' indicating the date - time: string 'hhmmss' indicating the time Return: - resp: server response if successful - list: list of message ids""" cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time return self.longcmd(cmd, file) def list(self, file=None): """Process a LIST command. Return: - resp: server response if successful - list: list of (group, last, first, flag) (strings)""" resp, list = self.longcmd('LIST', file) for i in range(len(list)): # Parse lines into "group last first flag" list[i] = tuple(list[i].split()) return resp, list def description(self, group): """Get a description for a single group. If more than one group matches ('group' is a pattern), return the first. If no group matches, return an empty string. This elides the response code from the server, since it can only be '215' or '285' (for xgtitle) anyway. If the response code is needed, use the 'descriptions' method. NOTE: This neither checks for a wildcard in 'group' nor does it check whether the group actually exists.""" resp, lines = self.descriptions(group) if len(lines) == 0: return "" else: return lines[0][1] def descriptions(self, group_pattern): """Get descriptions for a range of groups.""" line_pat = re.compile("^(?P<group>[^ \t]+)[ \t]+(.*)$") # Try the more std (acc. to RFC2980) LIST NEWSGROUPS first resp, raw_lines = self.longcmd('LIST NEWSGROUPS ' + group_pattern) if resp[:3] != "215": # Now the deprecated XGTITLE. This either raises an error # or succeeds with the same output structure as LIST # NEWSGROUPS. resp, raw_lines = self.longcmd('XGTITLE ' + group_pattern) lines = [] for raw_line in raw_lines: match = line_pat.search(raw_line.strip()) if match: lines.append(match.group(1, 2)) return resp, lines def group(self, name): """Process a GROUP command. Argument: - group: the group name Returns: - resp: server response if successful - count: number of articles (string) - first: first article number (string) - last: last article number (string) - name: the group name""" resp = self.shortcmd('GROUP ' + name) if resp[:3] != '211': raise NNTPReplyError(resp) words = resp.split() count = first = last = 0 n = len(words) if n > 1: count = words[1] if n > 2: first = words[2] if n > 3: last = words[3] if n > 4: name = words[4].lower() return resp, count, first, last, name def help(self, file=None): """Process a HELP command. Returns: - resp: server response if successful - list: list of strings""" return self.longcmd('HELP',file) def statparse(self, resp): """Internal: parse the response of a STAT, NEXT or LAST command.""" if resp[:2] != '22': raise NNTPReplyError(resp) words = resp.split() nr = 0 id = '' n = len(words) if n > 1: nr = words[1] if n > 2: id = words[2] return resp, nr, id def statcmd(self, line): """Internal: process a STAT, NEXT or LAST command.""" resp = self.shortcmd(line) return self.statparse(resp) def stat(self, id): """Process a STAT command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: the article number - id: the message id""" return self.statcmd('STAT ' + id) def next(self): """Process a NEXT command. No arguments. Return as for STAT.""" return self.statcmd('NEXT') def last(self): """Process a LAST command. No arguments. Return as for STAT.""" return self.statcmd('LAST') def artcmd(self, line, file=None): """Internal: process a HEAD, BODY or ARTICLE command.""" resp, list = self.longcmd(line, file) resp, nr, id = self.statparse(resp) return resp, nr, id, list def head(self, id): """Process a HEAD command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article's header""" return self.artcmd('HEAD ' + id) def body(self, id, file=None): """Process a BODY command. Argument: - id: article number or message id - file: Filename string or file object to store the article in Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article's body or an empty list if file was used""" return self.artcmd('BODY ' + id, file) def article(self, id): """Process an ARTICLE command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article""" return self.artcmd('ARTICLE ' + id) def slave(self): """Process a SLAVE command. Returns: - resp: server response if successful""" return self.shortcmd('SLAVE') def xhdr(self, hdr, str, file=None): """Process an XHDR command (optional server extension). Arguments: - hdr: the header type (e.g. 'subject') - str: an article nr, a message id, or a range nr1-nr2 Returns: - resp: server response if successful - list: list of (nr, value) strings""" pat = re.compile('^([0-9]+) ?(.*)\n?') resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str, file) for i in range(len(lines)): line = lines[i] m = pat.match(line) if m: lines[i] = m.group(1, 2) return resp, lines def xover(self, start, end, file=None): """Process an XOVER command (optional server extension) Arguments: - start: start of range - end: end of range Returns: - resp: server response if successful - list: list of (art-nr, subject, poster, date, id, references, size, lines)""" resp, lines = self.longcmd('XOVER ' + start + '-' + end, file) xover_lines = [] for line in lines: elem = line.split("\t") try: xover_lines.append((elem[0], elem[1], elem[2], elem[3], elem[4], elem[5].split(), elem[6], elem[7])) except IndexError: raise NNTPDataError(line) return resp,xover_lines def xgtitle(self, group, file=None): """Process an XGTITLE command (optional server extension) Arguments: - group: group name wildcard (i.e. news.*) Returns: - resp: server response if successful - list: list of (name,title) strings""" line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$") resp, raw_lines = self.longcmd('XGTITLE ' + group, file) lines = [] for raw_line in raw_lines: match = line_pat.search(raw_line.strip()) if match: lines.append(match.group(1, 2)) return resp, lines def xpath(self,id): """Process an XPATH command (optional server extension) Arguments: - id: Message id of article Returns: resp: server response if successful path: directory path to article""" resp = self.shortcmd("XPATH " + id) if resp[:3] != '223': raise NNTPReplyError(resp) try: [resp_num, path] = resp.split() except ValueError: raise NNTPReplyError(resp) else: return resp, path def date (self): """Process the DATE command. Arguments: None Returns: resp: server response if successful date: Date suitable for newnews/newgroups commands etc. time: Time suitable for newnews/newgroups commands etc.""" resp = self.shortcmd("DATE") if resp[:3] != '111': raise NNTPReplyError(resp) elem = resp.split() if len(elem) != 2: raise NNTPDataError(resp) date = elem[1][2:8] time = elem[1][-6:] if len(date) != 6 or len(time) != 6: raise NNTPDataError(resp) return resp, date, time def post(self, f): """Process a POST command. Arguments: - f: file containing the article Returns: - resp: server response if successful""" resp = self.shortcmd('POST') # Raises error_??? if posting is not allowed if resp[0] != '3': raise NNTPReplyError(resp) while 1: line = f.readline() if not line: break if line[-1] == '\n': line = line[:-1] if line[:1] == '.': line = '.' + line self.putline(line) self.putline('.') return self.getresp() def ihave(self, id, f): """Process an IHAVE command. Arguments: - id: message-id of the article - f: file containing the article Returns: - resp: server response if successful Note that if the server refuses the article an exception is raised.""" resp = self.shortcmd('IHAVE ' + id) # Raises error_??? if the server already has it if resp[0] != '3': raise NNTPReplyError(resp) while 1: line = f.readline() if not line: break if line[-1] == '\n': line = line[:-1] if line[:1] == '.': line = '.' + line self.putline(line) self.putline('.') return self.getresp() def quit(self): """Process a QUIT command and close the socket. Returns: - resp: server response if successful""" resp = self.shortcmd('QUIT') self.file.close() self.sock.close() del self.file, self.sock return resp # Test retrieval when run as a script. # Assumption: if there's a local news server, it's called 'news'. # Assumption: if user queries a remote news server, it's named # in the environment variable NNTPSERVER (used by slrn and kin) # and we want readermode off. if __name__ == '__main__': import os newshost = 'news' and os.environ["NNTPSERVER"] if newshost.find('.') == -1: mode = 'readermode' else: mode = None s = NNTP(newshost, readermode=mode) resp, count, first, last, name = s.group('comp.lang.python') print resp print 'Group', name, 'has', count, 'articles, range', first, 'to', last resp, subs = s.xhdr('subject', first + '-' + last) print resp for item in subs: print "%7s %s" % item resp = s.quit() print resp
gpl-3.0
blighj/django
django/conf/locale/th/formats.py
44
1070
# This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' TIME_FORMAT = 'G:i' DATETIME_FORMAT = 'j F Y, G:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'j M Y' SHORT_DATETIME_FORMAT = 'j M Y, G:i' FIRST_DAY_OF_WEEK = 0 # Sunday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d/%m/%Y', # 25/10/2006 '%d %b %Y', # 25 ต.ค. 2006 '%d %B %Y', # 25 ตุลาคม 2006 ] TIME_INPUT_FORMATS = [ '%H:%M:%S.%f', # 14:30:59.000200 '%H:%M:%S', # 14:30:59 '%H:%M', # 14:30 ] DATETIME_INPUT_FORMATS = [ '%d/%m/%Y %H:%M:%S.%f', # 25/10/2006 14:30:59.000200 '%d/%m/%Y %H:%M:%S', # 25/10/2006 14:30:59 '%d/%m/%Y %H:%M', # 25/10/2006 14:30 ] DECIMAL_SEPARATOR = '.' THOUSAND_SEPARATOR = ',' NUMBER_GROUPING = 3
bsd-3-clause
jmbergmann/yogi
yogi-python/tests/test_subscription.py
1
3085
#!/usr/bin/env python3 import sys import os sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + '/..') import yogi import unittest from proto import yogi_0000d007_pb2 from test_terminals import AsyncCall class TestSubscription(unittest.TestCase): def setUp(self): self.scheduler = yogi.Scheduler() self.leafA = yogi.Leaf(self.scheduler) self.leafB = yogi.Leaf(self.scheduler) self.connection = None self.tmA = yogi.PublishSubscribeTerminal('T', yogi_0000d007_pb2, leaf=self.leafA) self.tmB = yogi.PublishSubscribeTerminal('T', yogi_0000d007_pb2, leaf=self.leafB) self.bd = yogi.Binding(self.tmA, self.tmB.name) def tearDown(self): self.bd.destroy() self.tmA.destroy() self.tmB.destroy() if self.connection: self.connection.destroy() self.leafA.destroy() self.leafB.destroy() self.scheduler.destroy() def connect(self): self.assertIs(yogi.SubscriptionState.UNSUBSCRIBED, self.tmB.get_subscription_state()) self.connection = yogi.LocalConnection(self.leafA, self.leafB) while self.tmB.get_subscription_state() is yogi.SubscriptionState.UNSUBSCRIBED: pass self.assertIs(yogi.SubscriptionState.SUBSCRIBED, self.tmB.get_subscription_state()) def test_ctor(self): pass def test_async_get_subscription_state(self): with AsyncCall() as wrap: def fn(res, state): self.assertEqual(yogi.Success(), res) self.assertEqual(yogi.SubscriptionState.UNSUBSCRIBED, state) self.tmB.async_get_subscription_state(wrap(fn)) self.connect() with AsyncCall() as wrap: def fn(res, state): self.assertEqual(yogi.Success(), res) self.assertEqual(yogi.SubscriptionState.SUBSCRIBED, state) self.tmB.async_get_subscription_state(wrap(fn)) def test_async_await_subscription_state_changed(self): with AsyncCall() as wrap: def fn(res, state): self.assertEqual(yogi.Success(), res) self.assertEqual(yogi.SubscriptionState.SUBSCRIBED, state) self.tmB.async_await_subscription_state_change(wrap(fn)) self.connect() with AsyncCall() as wrap: def fn(res, state): self.assertEqual(yogi.Success(), res) self.assertEqual(yogi.SubscriptionState.UNSUBSCRIBED, state) self.tmB.async_await_subscription_state_change(wrap(fn)) self.connection.destroy() self.connection = None def test_cancel_await_subscription_state_changed(self): with AsyncCall() as wrap: def fn(res, state): self.assertEqual(yogi.Canceled(), res) self.tmB.async_await_subscription_state_change(wrap(fn)) self.tmB.cancel_await_subscription_state_change() if __name__ == '__main__': unittest.main()
gpl-3.0
kannon92/psi4
doc/sphinxman/source/psi4doc/ext/psidomain.py
1
1221
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2016 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # """Extension to format and index PSI variables.""" #Sphinx.add_object_type(psivar, rolename, indextemplate='', parse_node=None, ref_nodeclass=None, objname='', doc_field_types=[]) def setup(app): app.add_object_type('psivar', 'psivar', indextemplate='single: %s')
gpl-2.0
chmberl/django-cms
cms/utils/__init__.py
40
3233
# -*- coding: utf-8 -*- # TODO: this is just stuff from utils.py - should be splitted / moved from django.conf import settings from django.core.files.storage import get_storage_class from django.utils.functional import LazyObject from cms import constants from cms.utils.conf import get_cms_setting from cms.utils.conf import get_site_id # nopyflakes from cms.utils.i18n import get_default_language from cms.utils.i18n import get_language_list from cms.utils.i18n import get_language_code def get_template_from_request(request, obj=None, no_current_page=False): """ Gets a valid template from different sources or falls back to the default template. """ template = None if len(get_cms_setting('TEMPLATES')) == 1: return get_cms_setting('TEMPLATES')[0][0] if hasattr(request, 'POST') and "template" in request.POST: template = request.POST['template'] elif hasattr(request, 'GET') and "template" in request.GET: template = request.GET['template'] if not template and obj is not None: template = obj.get_template() if not template and not no_current_page and hasattr(request, "current_page"): current_page = request.current_page if hasattr(current_page, "get_template"): template = current_page.get_template() if template is not None and template in dict(get_cms_setting('TEMPLATES')).keys(): if template == constants.TEMPLATE_INHERITANCE_MAGIC and obj: # Happens on admin's request when changing the template for a page # to "inherit". return obj.get_template() return template return get_cms_setting('TEMPLATES')[0][0] def get_language_from_request(request, current_page=None): """ Return the most obvious language according the request """ language = None if hasattr(request, 'POST'): language = request.POST.get('language', None) if hasattr(request, 'GET') and not language: language = request.GET.get('language', None) site_id = current_page.site_id if current_page else None if language: language = get_language_code(language) if not language in get_language_list(site_id): language = None if language is None: language = get_language_code(getattr(request, 'LANGUAGE_CODE', None)) if language: if not language in get_language_list(site_id): language = None if language is None and current_page: # in last resort, get the first language available in the page languages = current_page.get_languages() if len(languages) > 0: language = languages[0] if language is None: # language must be defined in CMS_LANGUAGES, so check first if there # is any language with LANGUAGE_CODE, otherwise try to split it and find # best match language = get_default_language(site_id=site_id) return language default_storage = 'django.contrib.staticfiles.storage.StaticFilesStorage' class ConfiguredStorage(LazyObject): def _setup(self): self._wrapped = get_storage_class(getattr(settings, 'STATICFILES_STORAGE', default_storage))() configured_storage = ConfiguredStorage()
bsd-3-clause
shsingh/ansible
lib/ansible/module_utils/facts/system/selinux.py
162
3207
# Collect facts related to selinux # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.module_utils.facts.collector import BaseFactCollector try: import selinux HAVE_SELINUX = True except ImportError: HAVE_SELINUX = False SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } class SelinuxFactCollector(BaseFactCollector): name = 'selinux' _fact_ids = set() def collect(self, module=None, collected_facts=None): facts_dict = {} selinux_facts = {} # If selinux library is missing, only set the status and selinux_python_present since # there is no way to tell if SELinux is enabled or disabled on the system # without the library. if not HAVE_SELINUX: selinux_facts['status'] = 'Missing selinux Python library' facts_dict['selinux'] = selinux_facts facts_dict['selinux_python_present'] = False return facts_dict # Set a boolean for testing whether the Python library is present facts_dict['selinux_python_present'] = True if not selinux.is_selinux_enabled(): selinux_facts['status'] = 'disabled' else: selinux_facts['status'] = 'enabled' try: selinux_facts['policyvers'] = selinux.security_policyvers() except (AttributeError, OSError): selinux_facts['policyvers'] = 'unknown' try: (rc, configmode) = selinux.selinux_getenforcemode() if rc == 0: selinux_facts['config_mode'] = SELINUX_MODE_DICT.get(configmode, 'unknown') else: selinux_facts['config_mode'] = 'unknown' except (AttributeError, OSError): selinux_facts['config_mode'] = 'unknown' try: mode = selinux.security_getenforce() selinux_facts['mode'] = SELINUX_MODE_DICT.get(mode, 'unknown') except (AttributeError, OSError): selinux_facts['mode'] = 'unknown' try: (rc, policytype) = selinux.selinux_getpolicytype() if rc == 0: selinux_facts['type'] = policytype else: selinux_facts['type'] = 'unknown' except (AttributeError, OSError): selinux_facts['type'] = 'unknown' facts_dict['selinux'] = selinux_facts return facts_dict
gpl-3.0
roselleebarle04/opencog
opencog/python/blending/src/decider/base_decider.py
22
2632
from abc import ABCMeta, abstractmethod from blending.util.blending_config import BlendConfig from blending.util.blending_error import blending_status __author__ = 'DongMin Kim' class BaseDecider(object): """Abstract class to provide 'blending_decide()' interface. The blender will call the method 'blending_decide()', and this method will call the method 'blending_decide_impl()' in the derived class. Attributes: a: An instance of AtomSpace. last_status: A last status of class. ret: The decided atoms. :type a: opencog.atomspace.AtomSpace :type last_status: int :type ret: list[Atom] """ __metaclass__ = ABCMeta def __init__(self, a): self.a = a self.last_status = blending_status.UNKNOWN_ERROR self.ret = None self.make_default_config() def make_default_config(self): """Initialize a default config for this class.""" BlendConfig().update(self.a, "decide-result-atoms-count", "2") @abstractmethod def blending_decide_impl(self, chosen_atoms, config_base): """Abstract factory method for derived class. Args: chosen_atoms: The atoms to decide. config_base: A Node to save custom config. :param chosen_atoms: list[Atom] :param config_base: Atom Raises: NotImplementedError: Someone tried to call the abstract method. """ raise NotImplementedError("Please implement this method.") def blending_decide(self, chosen_atoms, config_base): """Wrapper method to control exception in derived class. Args: chosen_atoms: The atoms to decide. config_base: A Node to save custom config. :param chosen_atoms: list[Atom] :param config_base: Atom Returns: The decided atom(s). Example: [(ConceptNode "decided-atom-1"), (ConceptNode "decided-atom-3"), ...] If a list is empty, then means blending decider couldn't decided the proper atom(s) with given condition. :rtype : list[Atom] Raises: UserWarning: An error occurred in deciding. """ self.last_status = blending_status.IN_PROCESS self.blending_decide_impl(chosen_atoms, config_base) if self.last_status == blending_status.IN_PROCESS: self.last_status = blending_status.SUCCESS else: self.ret = [] raise UserWarning('ERROR_IN_BLENDING_DECIDER') return self.ret
agpl-3.0
GauravSahu/odoo
addons/stock_account/stock_account.py
77
19901
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp import SUPERUSER_ID, api import logging _logger = logging.getLogger(__name__) class stock_inventory(osv.osv): _inherit = "stock.inventory" _columns = { 'period_id': fields.many2one('account.period', 'Force Valuation Period', help="Choose the accounting period where you want to value the stock moves created by the inventory instead of the default one (chosen by the inventory end date)"), } def post_inventory(self, cr, uid, inv, context=None): if context is None: context = {} ctx = context.copy() if inv.period_id: ctx['force_period'] = inv.period_id.id return super(stock_inventory, self).post_inventory(cr, uid, inv, context=ctx) #---------------------------------------------------------- # Stock Location #---------------------------------------------------------- class stock_location(osv.osv): _inherit = "stock.location" _columns = { 'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain=[('type', '=', 'other')], help="Used for real-time inventory valuation. When set on a virtual location (non internal type), " "this account will be used to hold the value of products being moved from an internal location " "into this location, instead of the generic Stock Output Account set on the product. " "This has no effect for internal locations."), 'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain=[('type', '=', 'other')], help="Used for real-time inventory valuation. When set on a virtual location (non internal type), " "this account will be used to hold the value of products being moved out of this location " "and into an internal location, instead of the generic Stock Output Account set on the product. " "This has no effect for internal locations."), } #---------------------------------------------------------- # Quants #---------------------------------------------------------- class stock_quant(osv.osv): _inherit = "stock.quant" def _get_inventory_value(self, cr, uid, quant, context=None): if quant.product_id.cost_method in ('real'): return quant.cost * quant.qty return super(stock_quant, self)._get_inventory_value(cr, uid, quant, context=context) @api.cr_uid_ids_context def _price_update(self, cr, uid, quant_ids, newprice, context=None): ''' This function is called at the end of negative quant reconciliation and does the accounting entries adjustemnts and the update of the product cost price if needed ''' if context is None: context = {} account_period = self.pool['account.period'] super(stock_quant, self)._price_update(cr, uid, quant_ids, newprice, context=context) for quant in self.browse(cr, uid, quant_ids, context=context): move = self._get_latest_move(cr, uid, quant, context=context) valuation_update = newprice - quant.cost # this is where we post accounting entries for adjustment, if needed if not quant.company_id.currency_id.is_zero(valuation_update): # adjustment journal entry needed, cost has been updated period_id = (context.get('force_period') or account_period.find(cr, uid, move.date, context=context)[0]) period = account_period.browse(cr, uid, period_id, context=context) # If neg quant period already closed (likely with manual valuation), skip update if period.state != 'done': ctx = dict(context, force_valuation_amount=valuation_update) self._account_entry_move(cr, uid, [quant], move, context=ctx) #update the standard price of the product, only if we would have done it if we'd have had enough stock at first, which means #1) the product cost's method is 'real' #2) we just fixed a negative quant caused by an outgoing shipment if quant.product_id.cost_method == 'real' and quant.location_id.usage != 'internal': self.pool.get('stock.move')._store_average_cost_price(cr, uid, move, context=context) def _account_entry_move(self, cr, uid, quants, move, context=None): """ Accounting Valuation Entries quants: browse record list of Quants to create accounting valuation entries for. Unempty and all quants are supposed to have the same location id (thay already moved in) move: Move to use. browse record """ if context is None: context = {} location_obj = self.pool.get('stock.location') location_from = move.location_id location_to = quants[0].location_id company_from = location_obj._location_owner(cr, uid, location_from, context=context) company_to = location_obj._location_owner(cr, uid, location_to, context=context) if move.product_id.valuation != 'real_time': return False for q in quants: if q.owner_id: #if the quant isn't owned by the company, we don't make any valuation entry return False if q.qty <= 0: #we don't make any stock valuation for negative quants because the valuation is already made for the counterpart. #At that time the valuation will be made at the product cost price and afterward there will be new accounting entries #to make the adjustments when we know the real cost price. return False #in case of routes making the link between several warehouse of the same company, the transit location belongs to this company, so we don't need to create accounting entries # Create Journal Entry for products arriving in the company if company_to and (move.location_id.usage not in ('internal', 'transit') and move.location_dest_id.usage == 'internal' or company_from != company_to): ctx = context.copy() ctx['force_company'] = company_to.id journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx) if location_from and location_from.usage == 'customer': #goods returned from customer self._create_account_move_line(cr, uid, quants, move, acc_dest, acc_valuation, journal_id, context=ctx) else: self._create_account_move_line(cr, uid, quants, move, acc_src, acc_valuation, journal_id, context=ctx) # Create Journal Entry for products leaving the company if company_from and (move.location_id.usage == 'internal' and move.location_dest_id.usage not in ('internal', 'transit') or company_from != company_to): ctx = context.copy() ctx['force_company'] = company_from.id journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx) if location_to and location_to.usage == 'supplier': #goods returned to supplier self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_src, journal_id, context=ctx) else: self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_dest, journal_id, context=ctx) def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, force_location_from=False, force_location_to=False, context=None): quant = super(stock_quant, self)._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=force_location_from, force_location_to=force_location_to, context=context) if move.product_id.valuation == 'real_time': self._account_entry_move(cr, uid, [quant], move, context) return quant def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None): res = super(stock_quant, self).move_quants_write(cr, uid, quants, move, location_dest_id, dest_package_id, context=context) if move.product_id.valuation == 'real_time': self._account_entry_move(cr, uid, quants, move, context=context) return res def _get_accounting_data_for_valuation(self, cr, uid, move, context=None): """ Return the accounts and journal to use to post Journal Entries for the real-time valuation of the quant. :param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key :returns: journal_id, source account, destination account, valuation account :raise: osv.except_osv() is any mandatory account or journal is not defined. """ product_obj = self.pool.get('product.template') accounts = product_obj.get_product_accounts(cr, uid, move.product_id.product_tmpl_id.id, context) if move.location_id.valuation_out_account_id: acc_src = move.location_id.valuation_out_account_id.id else: acc_src = accounts['stock_account_input'] if move.location_dest_id.valuation_in_account_id: acc_dest = move.location_dest_id.valuation_in_account_id.id else: acc_dest = accounts['stock_account_output'] acc_valuation = accounts.get('property_stock_valuation_account_id', False) journal_id = accounts['stock_journal'] return journal_id, acc_src, acc_dest, acc_valuation def _prepare_account_move_line(self, cr, uid, move, qty, cost, credit_account_id, debit_account_id, context=None): """ Generate the account.move.line values to post to track the stock valuation difference due to the processing of the given quant. """ if context is None: context = {} currency_obj = self.pool.get('res.currency') if context.get('force_valuation_amount'): valuation_amount = context.get('force_valuation_amount') else: if move.product_id.cost_method == 'average': valuation_amount = cost if move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal' else move.product_id.standard_price else: valuation_amount = cost if move.product_id.cost_method == 'real' else move.product_id.standard_price #the standard_price of the product may be in another decimal precision, or not compatible with the coinage of #the company currency... so we need to use round() before creating the accounting entries. valuation_amount = currency_obj.round(cr, uid, move.company_id.currency_id, valuation_amount * qty) partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False debit_line_vals = { 'name': move.name, 'product_id': move.product_id.id, 'quantity': qty, 'product_uom_id': move.product_id.uom_id.id, 'ref': move.picking_id and move.picking_id.name or False, 'date': move.date, 'partner_id': partner_id, 'debit': valuation_amount > 0 and valuation_amount or 0, 'credit': valuation_amount < 0 and -valuation_amount or 0, 'account_id': debit_account_id, } credit_line_vals = { 'name': move.name, 'product_id': move.product_id.id, 'quantity': qty, 'product_uom_id': move.product_id.uom_id.id, 'ref': move.picking_id and move.picking_id.name or False, 'date': move.date, 'partner_id': partner_id, 'credit': valuation_amount > 0 and valuation_amount or 0, 'debit': valuation_amount < 0 and -valuation_amount or 0, 'account_id': credit_account_id, } return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)] def _create_account_move_line(self, cr, uid, quants, move, credit_account_id, debit_account_id, journal_id, context=None): #group quants by cost quant_cost_qty = {} for quant in quants: if quant_cost_qty.get(quant.cost): quant_cost_qty[quant.cost] += quant.qty else: quant_cost_qty[quant.cost] = quant.qty move_obj = self.pool.get('account.move') for cost, qty in quant_cost_qty.items(): move_lines = self._prepare_account_move_line(cr, uid, move, qty, cost, credit_account_id, debit_account_id, context=context) period_id = context.get('force_period', self.pool.get('account.period').find(cr, uid, context=context)[0]) move_obj.create(cr, uid, {'journal_id': journal_id, 'line_id': move_lines, 'period_id': period_id, 'date': fields.date.context_today(self, cr, uid, context=context), 'ref': move.picking_id.name}, context=context) #def _reconcile_single_negative_quant(self, cr, uid, to_solve_quant, quant, quant_neg, qty, context=None): # move = self._get_latest_move(cr, uid, to_solve_quant, context=context) # quant_neg_position = quant_neg.negative_dest_location_id.usage # remaining_solving_quant, remaining_to_solve_quant = super(stock_quant, self)._reconcile_single_negative_quant(cr, uid, to_solve_quant, quant, quant_neg, qty, context=context) # #update the standard price of the product, only if we would have done it if we'd have had enough stock at first, which means # #1) there isn't any negative quant anymore # #2) the product cost's method is 'real' # #3) we just fixed a negative quant caused by an outgoing shipment # if not remaining_to_solve_quant and move.product_id.cost_method == 'real' and quant_neg_position != 'internal': # self.pool.get('stock.move')._store_average_cost_price(cr, uid, move, context=context) # return remaining_solving_quant, remaining_to_solve_quant class stock_move(osv.osv): _inherit = "stock.move" def action_done(self, cr, uid, ids, context=None): self.product_price_update_before_done(cr, uid, ids, context=context) res = super(stock_move, self).action_done(cr, uid, ids, context=context) self.product_price_update_after_done(cr, uid, ids, context=context) return res def _store_average_cost_price(self, cr, uid, move, context=None): ''' move is a browe record ''' product_obj = self.pool.get('product.product') if any([q.qty <= 0 for q in move.quant_ids]): #if there is a negative quant, the standard price shouldn't be updated return #Note: here we can't store a quant.cost directly as we may have moved out 2 units (1 unit to 5€ and 1 unit to 7€) and in case of a product return of 1 unit, we can't know which of the 2 costs has to be used (5€ or 7€?). So at that time, thanks to the average valuation price we are storing we will svaluate it at 6€ average_valuation_price = 0.0 for q in move.quant_ids: average_valuation_price += q.qty * q.cost average_valuation_price = average_valuation_price / move.product_qty # Write the standard price, as SUPERUSER_ID because a warehouse manager may not have the right to write on products ctx = dict(context or {}, force_company=move.company_id.id) product_obj.write(cr, SUPERUSER_ID, [move.product_id.id], {'standard_price': average_valuation_price}, context=ctx) self.write(cr, uid, [move.id], {'price_unit': average_valuation_price}, context=context) def product_price_update_before_done(self, cr, uid, ids, context=None): product_obj = self.pool.get('product.product') tmpl_dict = {} for move in self.browse(cr, uid, ids, context=context): #adapt standard price on incomming moves if the product cost_method is 'average' if (move.location_id.usage == 'supplier') and (move.product_id.cost_method == 'average'): product = move.product_id prod_tmpl_id = move.product_id.product_tmpl_id.id qty_available = move.product_id.product_tmpl_id.qty_available if tmpl_dict.get(prod_tmpl_id): product_avail = qty_available + tmpl_dict[prod_tmpl_id] else: tmpl_dict[prod_tmpl_id] = 0 product_avail = qty_available if product_avail <= 0: new_std_price = move.price_unit else: # Get the standard price amount_unit = product.standard_price new_std_price = ((amount_unit * product_avail) + (move.price_unit * move.product_qty)) / (product_avail + move.product_qty) tmpl_dict[prod_tmpl_id] += move.product_qty # Write the standard price, as SUPERUSER_ID because a warehouse manager may not have the right to write on products ctx = dict(context or {}, force_company=move.company_id.id) product_obj.write(cr, SUPERUSER_ID, [product.id], {'standard_price': new_std_price}, context=ctx) def product_price_update_after_done(self, cr, uid, ids, context=None): ''' This method adapts the price on the product when necessary ''' for move in self.browse(cr, uid, ids, context=context): #adapt standard price on outgoing moves if the product cost_method is 'real', so that a return #or an inventory loss is made using the last value used for an outgoing valuation. if move.product_id.cost_method == 'real' and move.location_dest_id.usage != 'internal': #store the average price of the move on the move and product form self._store_average_cost_price(cr, uid, move, context=context)
agpl-3.0
karllessard/tensorflow
tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py
6
4086
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `LatencyAllEdges` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base from tensorflow.python.data.experimental.ops import stats_aggregator from tensorflow.python.data.experimental.ops import testing from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import combinations from tensorflow.python.platform import test class LatencyAllEdgesTest(stats_dataset_test_base.StatsDatasetTestBase, parameterized.TestCase): # TODO(jsimsa): Investigate why are graph-mode tests failing. @combinations.generate(test_base.eager_only_combinations()) def testLatencyStatsOptimizationAutotuneOff(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.from_tensors(1).apply( testing.assert_next([ "LatencyStats", "Map", "LatencyStats", "Prefetch", "LatencyStats", "MaxIntraOpParallelism", "LatencyStats", "SetStatsAggregator" ])).map(lambda x: x * x).prefetch(1) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.autotune = False options.experimental_stats.latency_all_edges = True options.experimental_stats.aggregator = aggregator dataset = dataset.with_options(options) self.assertDatasetProduces( dataset, expected_output=[1], requires_initialization=True, num_test_iterations=1) handle = self.getHandle(aggregator) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_latency::TensorDataset"), 1) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_latency::MapDataset"), 1) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_latency::PrefetchDataset"), 1) @combinations.generate(test_base.eager_only_combinations()) def testLatencyStatsOptimizationAutotuneOn(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.from_tensors(1).apply( testing.assert_next([ "LatencyStats", "Map", "LatencyStats", "Prefetch", "LatencyStats", "MaxIntraOpParallelism", "LatencyStats", "Model", "SetStatsAggregator" ])).map(lambda x: x * x).prefetch(1) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_stats.latency_all_edges = True options.experimental_stats.aggregator = aggregator dataset = dataset.with_options(options) self.assertDatasetProduces( dataset, expected_output=[1], requires_initialization=True, num_test_iterations=1) handle = self.getHandle(aggregator) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_latency::TensorDataset"), 1) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_latency::MapDataset"), 1) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_latency::PrefetchDataset"), 1) if __name__ == "__main__": test.main()
apache-2.0
hectorip/gae-boilerplate
bp_includes/external/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py
2360
3778
"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" # Note: This file is under the PSF license as the code comes from the python # stdlib. http://docs.python.org/3/license.html import re __version__ = '3.4.0.2' class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') parts = dn.split(r'.') leftmost = parts[0] remainder = parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found")
lgpl-3.0
kingosticks/mopidy
mopidy/models/immutable.py
4
7076
import copy import itertools import weakref from mopidy.models.fields import Field # Registered models for automatic deserialization _models = {} class ImmutableObject: """ Superclass for immutable objects whose fields can only be modified via the constructor. This version of this class has been retained to avoid breaking any clients relying on it's behavior. Internally in Mopidy we now use :class:`ValidatedImmutableObject` for type safety and it's much smaller memory footprint. :param kwargs: kwargs to set as fields on the object :type kwargs: any """ # Any sub-classes that don't set slots won't be effected by the base using # slots as they will still get an instance dict. __slots__ = ["__weakref__"] def __init__(self, *args, **kwargs): for key, value in kwargs.items(): if not self._is_valid_field(key): raise TypeError( f"__init__() got an unexpected keyword argument {key!r}" ) self._set_field(key, value) def __setattr__(self, name, value): if name.startswith("_"): object.__setattr__(self, name, value) else: raise AttributeError("Object is immutable.") def __delattr__(self, name): if name.startswith("_"): object.__delattr__(self, name) else: raise AttributeError("Object is immutable.") def _is_valid_field(self, name): return hasattr(self, name) and not callable(getattr(self, name)) def _set_field(self, name, value): if value == getattr(self.__class__, name): self.__dict__.pop(name, None) else: self.__dict__[name] = value def _items(self): return self.__dict__.items() def __repr__(self): kwarg_pairs = [] for key, value in sorted(self._items()): if isinstance(value, (frozenset, tuple)): if not value: continue value = list(value) kwarg_pairs.append(f"{key}={value!r}") return f"{self.__class__.__name__}({', '.join(kwarg_pairs)})" def __hash__(self): hash_sum = 0 for key, value in self._items(): hash_sum += hash(key) + hash(value) return hash_sum def __eq__(self, other): if not isinstance(other, self.__class__): return False return all( a == b for a, b in itertools.zip_longest( self._items(), other._items(), fillvalue=object() ) ) def __ne__(self, other): return not self.__eq__(other) def replace(self, **kwargs): """ Replace the fields in the model and return a new instance Examples:: # Returns a track with a new name Track(name='foo').replace(name='bar') # Return an album with a new number of tracks Album(num_tracks=2).replace(num_tracks=5) :param kwargs: kwargs to set as fields on the object :type kwargs: any :rtype: instance of the model with replaced fields """ other = copy.copy(self) for key, value in kwargs.items(): if not self._is_valid_field(key): raise TypeError( f"replace() got an unexpected keyword argument {key!r}" ) other._set_field(key, value) return other def serialize(self): data = {} data["__model__"] = self.__class__.__name__ for key, value in self._items(): if isinstance(value, (set, frozenset, list, tuple)): value = [ v.serialize() if isinstance(v, ImmutableObject) else v for v in value ] elif isinstance(value, ImmutableObject): value = value.serialize() if not (isinstance(value, list) and len(value) == 0): data[key] = value return data class _ValidatedImmutableObjectMeta(type): """Helper that initializes fields, slots and memoizes instance creation.""" def __new__(cls, name, bases, attrs): fields = {} for base in bases: # Copy parent fields over to our state fields.update(getattr(base, "_fields", {})) for key, value in attrs.items(): # Add our own fields if isinstance(value, Field): fields[key] = "_" + key value._name = key attrs["_fields"] = fields attrs["_instances"] = weakref.WeakValueDictionary() attrs["__slots__"] = list(attrs.get("__slots__", [])) + list( fields.values() ) clsc = super().__new__(cls, name, bases, attrs) if clsc.__name__ != "ValidatedImmutableObject": _models[clsc.__name__] = clsc return clsc def __call__(cls, *args, **kwargs): # noqa: N805 instance = super().__call__(*args, **kwargs) return cls._instances.setdefault(weakref.ref(instance), instance) class ValidatedImmutableObject( ImmutableObject, metaclass=_ValidatedImmutableObjectMeta ): """ Superclass for immutable objects whose fields can only be modified via the constructor. Fields should be :class:`Field` instances to ensure type safety in our models. Note that since these models can not be changed, we heavily memoize them to save memory. So constructing a class with the same arguments twice will give you the same instance twice. """ __slots__ = ["_hash"] def __hash__(self): if not hasattr(self, "_hash"): hash_sum = super().__hash__() object.__setattr__(self, "_hash", hash_sum) return self._hash def _is_valid_field(self, name): return name in self._fields def _set_field(self, name, value): object.__setattr__(self, name, value) def _items(self): for field, key in self._fields.items(): if hasattr(self, key): yield field, getattr(self, key) def replace(self, **kwargs): """ Replace the fields in the model and return a new instance Examples:: # Returns a track with a new name Track(name='foo').replace(name='bar') # Return an album with a new number of tracks Album(num_tracks=2).replace(num_tracks=5) Note that internally we memoize heavily to keep memory usage down given our overly repetitive data structures. So you might get an existing instance if it contains the same values. :param kwargs: kwargs to set as fields on the object :type kwargs: any :rtype: instance of the model with replaced fields """ if not kwargs: return self other = super().replace(**kwargs) if hasattr(self, "_hash"): object.__delattr__(other, "_hash") return self._instances.setdefault(weakref.ref(other), other)
apache-2.0
CaptainDesAstres/Simple-Blender-Render-Manager
usefullFunctions.py
1
1409
#!/usr/bin/python3.4 # -*-coding:Utf-8 -* import time def now(short = True): '''return current date in short or long form (HH:MM:SS or DD.MM.AAAA-HH:MM:SS)''' if short == True: return time.strftime('%H:%M:%S') else: return time.strftime('%d.%m.%Y-%H:%M:%S') def columnLimit(value, limit, begin = True, sep = '|'): '''make fix sized text column''' if type(value) is not str: value = str(value) if begin is True: begin = limit# number of first caracter to display if len(value) > limit: return (value[0:begin-1]+'…'# first caracter\ +value[len(value)-(limit-begin):]# last caracter\ +sep) # column seperator else: return value + (' '*(limit-len(value))) +sep# add space to match needed size def indexPrintList(l): '''Print a list and index''' for i, v in enumerate(l): print(str(i)+'- '+str(v)) class XML: ''' a class containing usefull method for XML''' entities = { '\'':'&apos;', '"':'&quot;', '<':'&lt;', '>':'&gt;' } def encode(txt): '''replace XML entities by XML representation''' txt.replace('&', '&amp;') for entity, code in XML.entities.items(): txt.replace(entity, code) return txt def decode(txt): '''XML representation by the original character''' for entity, code in XML.entities.items(): txt.replace(code, entity) txt.replace('&amp;', '&') return txt
mit
MathewWi/fbzx-wii
src/z80free/z80free_gencode.py
8
50964
#!/usr/bin/env python # -*- coding: UTF-8 -*- # Copyright 2008-2009 (C) Raster Software Vigo (Sergio Costas) # This file is part of Z80Free # # Z80Free is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # Z80Free is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys class generic_parser: def get_code_from_one_param(self,element,bits): """ Returns nbits,read,write,condition Parses the ELEMENT string and, if its a register, a number or a memory access, returns in READ a piece of C code that allows to get that value; in WRITE a piece of code that allows to store a value there (if applicable, None if not valid) and NONE in CONDITION. if ELEMENT is a condition (like check ZERO, CARRY...), it returns None in READ and WRITE, and in CONDITION a piece of C code which returns TRUE (1) if that condition is true, or FALSE (0) if it is false. When the string has an ambiguous size (like (nn) in LD A,(nn) ), this function will use BITS to know how many bits has to use. But if BITS is 0 it means that the main class still doesn't know how many bits the current instruction is using, so this function must return 0 in NBITS. But if it can determine the true bit size (both because it's a fixed size element, like a register, or because BITS contains a value different of 0) then it must returns that size. If the combination of ELEMENT and BITS is unknown, it must call self.unknown_param() to show an error and stop the parsing process """ raise TypeError('Abstract method z80parser.get_code_from_one_param called') def create_code(self,ncode,tst1,tst2,opcode,code_read,code_write,condition,bits): """ Writes C code for the opcode OPCODE, with the list CODE_READ containing C code for read each one of the parameters, the list CODE_WRITE containing C code for write each one of the parameters, CONDITION containing a C code string to check if the condition is TRUE or FALSE (or containing NONE if the opcode is unconditional), and all parameters having a wide of BITS bits. NCODE is the decimal code asigned to this opcode, and TST1 and TST2 are the TStates used by this opcode to run (the first when the condition is True, and the second when the condition is False). If this method returns False the main class will assume that it hasn't added the C code to return the number of TStates, and will add a line with the code RETURN (TST1); after it; but if it returns True, the class will assume that the C code inserted already returns the number of TStates, and won't add the RETURN line. This is an abstract method, and must be implemented in the class derived from this one. If the combination of OPCODE and BITS is unknown, it must call self.unknown_opcode() to show an error and stop the parsing process """ raise TypeError('Abstract method z80parser.create_code called') def write_start_code(self,filename,file_out): """ Writes the first part of the C code for the file """ raise TypeError('Abstract method z80parser.write_start_code called') def write_end_code(self,filename,file_out): """ Writes the first part of the C code for the file """ raise TypeError('Abstract method z80parser.write_end_code called') def license(self): """ writes the license terms. It must be always the GPL """ if self.copyright_year==None: raise TypeError('Copyright year not defined') if self.copyright_author==None: raise TypeError('Copyright author not defined') if self.copyright_program_name==None: raise TypeError('Copyright program name not defined') self.file_out.write('/*\n') self.file_out.write(' * Copyright (C) '+str(self.copyright_year)+' '+str(self.copyright_author)+'\n') self.file_out.write(' * This file is part of '+str(self.copyright_program_name)+'\n') self.file_out.write(' *\n') self.file_out.write(' * '+str(self.copyright_program_name)+' is free software; you can redistribute it and/or modify\n') self.file_out.write(' * it under the terms of the GNU General Public License as published by\n') self.file_out.write(' * the Free Software Foundation; either version 3 of the License, or\n') self.file_out.write(' * (at your option) any later version.\n') self.file_out.write(' *\n') self.file_out.write(' * '+str(self.copyright_program_name)+' is distributed in the hope that it will be useful,\n') self.file_out.write(' * but WITHOUT ANY WARRANTY; without even the implied warranty of\n') self.file_out.write(' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n') self.file_out.write(' * GNU General Public License for more details.\n') self.file_out.write(' *\n') self.file_out.write(' * You should have received a copy of the GNU General Public License\n') self.file_out.write(' * along with this program. If not, see <http://www.gnu.org/licenses/>.\n') self.file_out.write(' * \n') self.file_out.write(' */') def __init__(self,filename): self.line_number=0 self.curr_element="" self.curr_bits=0 self.curr_opcode="" self.curr_instruction="" self.filename=filename self.file_in=open(filename+".txt","r") self.file_out=open(filename+".c","w") self.license() # the code generated by z80free_gencode.py is ALWAYS GPL self.file_out.write('\n\n') self.write_start_code() self.file_out.write('\tswitch(opcode) {\n') while True: if self.proccess_line()==False: break self.file_out.write('\t}\n\n') self.write_end_code() self.file_out.close() self.file_in.close() def proccess_line(self): while True: ncode,tst1,tst2,opcode,params=self.get_parts() if ncode==None: return False if tst1!=0: break code_read,code_write,condition,bits=self.get_code_from_params(params) self.file_out.write("\tcase "+str(ncode)+": // "+self.curr_instruction+"\n") self.curr_opcode=opcode self.curr_bits=bits tst_done=self.create_code(ncode,tst1,tst2,opcode,code_read,code_write,condition,bits) if tst_done==False: self.file_out.write("\t\treturn ("+str(tst1)+");\n") self.file_out.write("\tbreak;\n") def unknown_opcode(self): self.show_error("Unknown opcode "+str(self.curr_opcode)+" for "+str(self.curr_bits)+" bits wide while processing "+str(self.curr_instruction)) def unknown_param(self): self.show_error("Unknown parameter "+str(self.curr_element)+" for "+str(self.curr_bits)+" bits wide while processing "+str(self.curr_instruction)) def show_error(self,msg_error): print print "Syntax error while processing file "+self.filename+".txt" print "at line "+str(self.line_number)+" when processing instruction "+self.curr_instruction print msg_error print "Check the file and run again" print self.file_out.close() self.file_in.close() sys.exit(1) def get_parts(self): """ Gets the parts from a file line: number code, TStates OPCODE and parameters """ while True: line=self.file_in.readline() self.line_number+=1 if line=="": return None,0,0,"","" if (line[0]!="#") and (line!="\n"): break if line[-1]=="\n": line=line[:-1] l_ncode=int(line[0:2],16) pos=line[3:].find("\t") # find the separator between the TStates and the OPCODE if (line[3]=="\t") or (line[3]=="*") or (pos==-1): # if there are no TStates nor OPCODE, its an empty entry return l_ncode,0,0,"","" tstates=line[3:pos+3] pos2=tstates.find("/") # check if there are two TState values if pos2==-1: l_tst1=int(tstates) l_tst2=int(tstates) else: l_tst1=int(tstates[:pos2]) # tstates if the condition is True l_tst2=int(tstates[pos2+1:]) # tstates if the condition is False l_opcode=line[pos+4:] self.curr_instruction=l_opcode pos=l_opcode.find(" ") if pos!=-1: # there are parameters l_params=l_opcode[pos+1:] l_opcode=l_opcode[:pos] else: l_params="" return l_ncode,l_tst1,l_tst2,l_opcode,l_params def get_code_from_params(self,params): """ Returns the C code to read or write the params, and the code for the condition Return two lists, an string and an integer. The first list contains the C code to read each parameter; the second list the C code to write each parameter; the string returns the string which checks the condition (None if there's no condition) and the integer indicates when the operators are 8 or 16 bit wide. If it finds a set of parameters that can't parse, it prints it and exits It uses the virtual method get_code_from_one_param(element,bits) to parse each parameter """ bits=0 if params=="": return [],[],None,0 bits_found=False list_params=params.split(",") while bits_found==False: bits_found=True read_code=[] write_code=[] condition=None for element in list_params: # a parameter's bit wide can be ambiguous (like (nn) in LD A,(nn) or # LD HL,(nn) ) so we try to read it passing the currently known bit wide, # and the function must returns the true wide if it's unambiguous (like A, HL...) # the true wide if passed a correct wide, or 0 if the current bit wide is # unknown and the parameter is ambiguous. So in the case of # LD A,(nn) we do only one pass to get the wide, but in the case of # LD (nn),A we do two passes, because the first parameter is ambiguous # in size self.curr_bits=bits self.curr_element=element nbits,read,write,p_condition=self.get_code_from_one_param(element,bits) if p_condition!=None: # this parameter is a condition condition=p_condition continue if nbits==0: bits_found=False else: if bits==0: bits=nbits if bits!=nbits: self.show_error("Mixed bit wide parameters (8 and 16 bit parameters in the same line)") read_code.append(read) write_code.append(write) if (bits==0) and (bits_found==False): self.show_error("All parameters are ambiguous: "+str(params)) return read_code,write_code,condition,bits class z80_parser(generic_parser): def __init__(self,filename): self.copyright_year="2008-2009" self.copyright_author="Sergio Costas (Raster Software Vigo)" self.copyright_program_name="Z80Free" generic_parser.__init__(self, filename) def get_code_from_one_param(self,element,bits): """ Returns nbits,read,write,condition """ if element=="AF": return 16,"processor->Rm.wr.AF","processor->Rm.wr.AF=@DATA@;",None if element=="AF'": return 16,"processor->Ra.wr.AF","processor->Ra.wr.AF=@DATA@;",None if element=="BC": return 16,"processor->Rm.wr.BC","processor->Rm.wr.BC=@DATA@;",None if element=="DE": return 16,"processor->Rm.wr.DE","processor->Rm.wr.DE=@DATA@;",None if element=="HL": return 16,"processor->Rm.wr.HL","processor->Rm.wr.HL=@DATA@;",None if element=="SP": return 16,"processor->Rm.wr.SP","processor->Rm.wr.SP=@DATA@;",None if element=="IX": return 16,"processor->Rm.wr.IX","processor->Rm.wr.IX=@DATA@;",None if element=="IY": return 16,"processor->Rm.wr.IY","processor->Rm.wr.IY=@DATA@;",None if element=="(IX+d)": if bits==0: return 0,None,None,None elif bits==8: if (self.codes=="DDCB") or (self.codes=="FDCB"): return 8,"Z80free_Rd(Z80free_addr_relativeXDCB(processor,processor->Rm.wr.IX,d1))","Z80free_Wr(Z80free_addr_relativeXDCB(processor,processor->Rm.wr.IX,d1),@DATA@);",None else: return 8,"Z80free_Rd(Z80free_addr_relative(processor,processor->Rm.wr.IX))","Z80free_Wr(Z80free_addr_relative(processor,processor->Rm.wr.IX),@DATA@);",None if element=="(IY+d)": if bits==0: return 0,None,None,None elif bits==8: if (self.codes=="DDCB") or (self.codes=="FDCB"): return 8,"Z80free_Rd(Z80free_addr_relativeXDCB(processor,processor->Rm.wr.IY,d1))","Z80free_Wr(Z80free_addr_relativeXDCB(processor,processor->Rm.wr.IY,d1),@DATA@);",None else: return 8,"Z80free_Rd(Z80free_addr_relative(processor,processor->Rm.wr.IY))","Z80free_Wr(Z80free_addr_relative(processor,processor->Rm.wr.IY),@DATA@);",None if element=="IX+d": if bits==0: return 0,None,None,None elif bits==8: return 8,"Z80free_addr_relative(processor,processor->Rm.wr.IX)",None,None if element=="IY+d": if bits==0: return 0,None,None,None elif bits==8: return 8,"Z80free_addr_relative(processor,processor->Rm.wr.IY)",None,None if element=="b(IX+d)": if (self.codes=="DDCB") or (self.codes=="FDCB"): return 8,"Z80free_Rd(Z80free_addr_relativeXDCB(processor,processor->Rm.wr.IX,d1))","Z80free_Wr(Z80free_addr_relativeXDCB(processor,processor->Rm.wr.IX,d1),@DATA@);",None else: return 8,"Z80free_Rd(Z80free_addr_relative(processor,processor->Rm.wr.IX))","Z80free_Wr(Z80free_addr_relative(processor,processor->Rm.wr.IX),@DATA@);",None if element=="b(IY+d)": if (self.codes=="DDCB") or (self.codes=="FDCB"): return 8,"Z80free_Rd(Z80free_addr_relativeXDCB(processor,processor->Rm.wr.IY,d1))","Z80free_Wr(Z80free_addr_relativeXDCB(processor,processor->Rm.wr.IY,d1),@DATA@);",None else: return 8,"Z80free_Rd(Z80free_addr_relative(processor,processor->Rm.wr.IY))","Z80free_Wr(Z80free_addr_relative(processor,processor->Rm.wr.IY),@DATA@);",None if element=="nn": return 16,"Z80free_read_param_16(processor)",None,None if element=="n": return 8,"Z80free_read_param_8(processor)",None,None if element=="(nn)": if bits==0: return 0,None,None,None elif bits==8: return 8,"Z80free_Rd(Z80free_read_param_16(processor))","Z80free_Wr(Z80free_read_param_16(processor),@DATA@);",None elif bits==16: return 16,"Z80free_read16(Z80free_read_param_16(processor))","Z80free_write16(Z80free_read_param_16(processor),@DATA@);",None if element=="(BC)": if bits==0: return 0,None,None,None elif bits==8: return 8,"Z80free_Rd(processor->Rm.wr.BC)","Z80free_Wr(processor->Rm.wr.BC,@DATA@);",None elif bits==16: return 16,"Z80free_read16(processor->Rm.wr.BC)","Z80free_write16(processor->Rm.wr.BC,@DATA@);",None if element=="(DE)": if bits==0: return 0,None,None,None elif bits==8: return 8,"Z80free_Rd(processor->Rm.wr.DE)","Z80free_Wr(processor->Rm.wr.DE,@DATA@);",None elif bits==16: return 16,"Z80free_read16(processor->Rm.wr.DE)","Z80free_write16(processor->Rm.wr.DE,@DATA@);",None if element=="(HL)": if bits==0: return 0,None,None,None elif bits==8: return 8,"Z80free_Rd(processor->Rm.wr.HL)","Z80free_Wr(processor->Rm.wr.HL,@DATA@);",None elif bits==16: return 16,"Z80free_read16(processor->Rm.wr.HL)","Z80free_write16(processor->Rm.wr.HL,@DATA@);",None if element=="(SP)": if bits==0: return 0,None,None,None elif bits==16: return 16,"Z80free_read16(processor->Rm.wr.SP)","Z80free_write16(processor->Rm.wr.SP,@DATA@);",None if element=="b(HL)": return 8,"Z80free_Rd(processor->Rm.wr.HL)","Z80free_Wr(processor->Rm.wr.HL,@DATA@);",None if element=="I": return 8,"Z80free_readI(processor)","processor->I=@DATA@;",None if element=="R": return 8,"Z80free_readR(processor)","Z80free_setR(processor,@DATA@);",None if element=="A": return 8,"processor->Rm.br.A","processor->Rm.br.A=@DATA@;",None if element=="B": return 8,"processor->Rm.br.B","processor->Rm.br.B=@DATA@;",None if element=="C": return 8,"processor->Rm.br.C","processor->Rm.br.C=@DATA@;",None if element=="D": return 8,"processor->Rm.br.D","processor->Rm.br.D=@DATA@;",None if element=="E": return 8,"processor->Rm.br.E","processor->Rm.br.E=@DATA@;",None if element=="H": return 8,"processor->Rm.br.H","processor->Rm.br.H=@DATA@;",None if element=="L": return 8,"processor->Rm.br.L","processor->Rm.br.L=@DATA@;",None if element=="IXH": return 8,"processor->Rm.br.IXh","processor->Rm.br.IXh=@DATA@;",None if element=="IXL": return 8,"processor->Rm.br.IXl","processor->Rm.br.IXl=@DATA@;",None if element=="IYH": return 8,"processor->Rm.br.IYh","processor->Rm.br.IYh=@DATA@;",None if element=="IYL": return 8,"processor->Rm.br.IYl","processor->Rm.br.IYl=@DATA@;",None if element=="0H": return 16,"0",None,None if element=="0": return 8,"0",None,None if element=="1": return 8,"1",None,None if element=="2": return 8,"2",None,None if element=="3": return 8,"3",None,None if element=="4": return 8,"4",None,None if element=="5": return 8,"5",None,None if element=="6": return 8,"6",None,None if element=="7": return 8,"7",None,None if element=="8H": return 16,"8",None,None if element=="10H": return 16,"16",None,None if element=="18H": return 16,"24",None,None if element=="20H": return 16,"32",None,None if element=="28H": return 16,"40",None,None if element=="30H": return 16,"48",None,None if element=="38H": return 16,"56",None,None if element=="NZ": return 0,None,None,"(0==(F_Z&processor->Rm.br.F))" if element=="Z": return 0,None,None,"(F_Z&processor->Rm.br.F)" if element=="NC": return 0,None,None,"(0==(F_C&processor->Rm.br.F))" if element=="CF": return 0,None,None,"(F_C&processor->Rm.br.F)" if element=="PO": return 0,None,None,"(0==(F_PV&processor->Rm.br.F))" if element=="PE": return 0,None,None,"(F_PV&processor->Rm.br.F)" if element=="P": return 0,None,None,"(0==(F_S&processor->Rm.br.F))" if element=="M": return 0,None,None,"(F_S&processor->Rm.br.F)" self.unknown_param() def create_code(self,ncode,tst1,tst2,opcode,code_read,code_write,condition,bits): """ Returns True if it has written the RETURN for returning the TStates """ if opcode=="NOP": return False if opcode=="LD2": self.file_out.write("\t\ttmp2="+code_read[0]+";\n") self.file_out.write("\t\tZ80free_Wr(tmp2,"+code_read[1]+");\n") return False if opcode=="LD": self.file_out.write("\t\t"+code_write[0].replace("@DATA@",code_read[1])+"\n") return False if opcode=="INC": if bits==16: self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doIncDec16(processor,"+code_read[0]+",0)")+"\n") return False elif bits==8: self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doIncDec(processor,"+code_read[0]+",0)")+"\n") return False if opcode=="DEC": if bits==16: self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doIncDec16(processor,"+code_read[0]+",1)")+"\n") return False elif bits==8: self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doIncDec(processor,"+code_read[0]+",1)")+"\n") return False if opcode=="RLCA": self.file_out.write("\t\tprocessor->Rm.br.A=Z80free_doRLC(processor,0,processor->Rm.br.A);\n") return False if opcode=="RRCA": self.file_out.write("\t\tprocessor->Rm.br.A=Z80free_doRRC(processor,0,processor->Rm.br.A);\n") return False if opcode=="EX": self.file_out.write("\t\ttmp2="+code_read[0]+";\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@",code_read[1])+"\n") self.file_out.write("\t\t"+code_write[1].replace("@DATA@","tmp2")+"\n") return False if opcode=="ADD": if bits==16: data="Z80free_doArithmetic16(processor,"+code_read[0]+","+code_read[1]+",0,0)" self.file_out.write("\t\t"+code_write[0].replace("@DATA@",data)+"\n") return False elif bits==8: data="Z80free_doArithmetic(processor,"+code_read[0]+","+code_read[1]+",0,0)" self.file_out.write("\t\t"+code_write[0].replace("@DATA@",data)+"\n") return False if opcode=="ADC": if bits==16: data="Z80free_doArithmetic16(processor,"+code_read[0]+","+code_read[1]+",1,0)" self.file_out.write("\t\t"+code_write[0].replace("@DATA@",data)+"\n") return False elif bits==8: data="Z80free_doArithmetic(processor,"+code_read[0]+","+code_read[1]+",1,0)" self.file_out.write("\t\t"+code_write[0].replace("@DATA@",data)+"\n") return False if opcode=="SUB": if bits==16: data="Z80free_doArithmetic16(processor,"+code_read[0]+","+code_read[1]+",0,1)" self.file_out.write("\t\t"+code_write[0].replace("@DATA@",data)+"\n") return False elif bits==8: data="Z80free_doArithmetic(processor,processor->Rm.br.A,"+code_read[0]+",0,1)" self.file_out.write("\t\tprocessor->Rm.br.A="+data+";\n") return False if opcode=="SBC": if bits==16: data="Z80free_doArithmetic16(processor,"+code_read[0]+","+code_read[1]+",1,1)" self.file_out.write("\t\t"+code_write[0].replace("@DATA@",data)+"\n") return False elif bits==8: data="Z80free_doArithmetic(processor,"+code_read[0]+","+code_read[1]+",1,1)" self.file_out.write("\t\t"+code_write[0].replace("@DATA@",data)+"\n") return False if opcode=="DJNZ": self.file_out.write("\t\ttmp1="+code_read[0]+";\n") self.file_out.write("\t\tprocessor->Rm.br.B--;\n") self.file_out.write("\t\tif (processor->Rm.br.B) {\n") self.file_out.write("\t\t\tZ80free_jump_relative(processor,tmp1);\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="RLA": self.file_out.write("\t\tprocessor->Rm.br.A=Z80free_doRL(processor,0,processor->Rm.br.A);\n") return False if opcode=="RRA": self.file_out.write("\t\tprocessor->Rm.br.A=Z80free_doRR(processor,0,processor->Rm.br.A);\n") return False if opcode=="DAA": self.file_out.write("\t\tZ80free_doDAA(processor);\n") return False if opcode=="CPL": self.file_out.write("\t\tprocessor->Rm.br.A=255-processor->Rm.br.A;\n") self.file_out.write("\t\tZ80free_adjustFlags(processor,processor->Rm.br.A);\n") self.file_out.write("\t\tZ80free_setFlag(processor,F_H);\n") self.file_out.write("\t\tZ80free_setFlag(processor,F_N);\n") return False if opcode=="SCF": self.file_out.write("\t\tZ80free_setFlag(processor,F_C);\n") self.file_out.write("\t\tZ80free_resFlag(processor,F_H);\n") self.file_out.write("\t\tZ80free_resFlag(processor,F_N);\n") self.file_out.write("\t\tZ80free_adjustFlags(processor,processor->Rm.br.A);\n") return False if opcode=="CCF": self.file_out.write("\t\tZ80free_valFlag(processor,F_C,(~processor->Rm.br.F)&F_C);\n") self.file_out.write("\t\tZ80free_resFlag(processor,F_N);\n") self.file_out.write("\t\tZ80free_adjustFlags(processor,processor->Rm.br.A);\n") return False if opcode=="HALT": self.file_out.write("\t\tprocessor->PC--;\n") self.file_out.write("\t\tprocessor->HALT=1;\n") return False if opcode=="AND": if bits==8: self.file_out.write("\t\tZ80free_doAND(processor,"+code_read[0]+");\n") return False if opcode=="OR": if bits==8: self.file_out.write("\t\tZ80free_doOR(processor,"+code_read[0]+");\n") return False if opcode=="XOR": if bits==8: self.file_out.write("\t\tZ80free_doXOR(processor,"+code_read[0]+");\n") return False if opcode=="CP": if bits==8: self.file_out.write("\t\ttmp1="+code_read[0]+";\n") self.file_out.write("\t\tZ80free_doArithmetic(processor,processor->Rm.br.A,tmp1,0,1);\n") self.file_out.write("\t\tZ80free_adjustFlags(processor,tmp1);\n") return False if opcode=="JR": self.file_out.write("\t\ttmp1="+code_read[0]+";\n") if condition==None: # no condition, always jump self.file_out.write("\t\tZ80free_jump_relative(processor,tmp1);\n") return False else: self.file_out.write("\t\tif "+condition+" {\n") self.file_out.write("\t\t\tZ80free_jump_relative(processor,tmp1);\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="RET": if condition==None: self.file_out.write("\t\tprocessor->PC=Z80free_doPop(processor);\n") return False else: self.file_out.write("\t\tif "+condition+" {\n") self.file_out.write("\t\t\tprocessor->PC=Z80free_doPop(processor);\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="POP": self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doPop(processor)")+"\n") return False if opcode=="JP": self.file_out.write("\t\ttmp2="+code_read[0]+";\n") if condition==None: self.file_out.write("\t\tprocessor->PC=tmp2;\n") return False else: self.file_out.write("\t\tif "+condition+" {\n") self.file_out.write("\t\t\tprocessor->PC=tmp2;\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="CALL": self.file_out.write("\t\ttmp2="+code_read[0]+";\n") if condition==None: self.file_out.write("\t\tZ80free_doPush(processor,processor->PC);\n") self.file_out.write("\t\tprocessor->PC=tmp2;\n") return False else: self.file_out.write("\t\tif "+condition+" {\n") self.file_out.write("\t\t\tZ80free_doPush(processor,processor->PC);\n") self.file_out.write("\t\t\tprocessor->PC=tmp2;\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="PUSH": self.file_out.write("\t\tZ80free_doPush(processor,"+code_read[0]+");\n") return False if opcode=="RST": self.file_out.write("\t\tZ80free_doPush(processor,processor->PC);\n") self.file_out.write("\t\tprocessor->PC="+code_read[0]+";\n") return False if opcode=="EXX": self.file_out.write("\t\ttmp2=processor->Rm.wr.BC;\n") self.file_out.write("\t\tprocessor->Rm.wr.BC=processor->Ra.wr.BC;\n") self.file_out.write("\t\tprocessor->Ra.wr.BC=tmp2;\n") self.file_out.write("\t\ttmp2=processor->Rm.wr.DE;\n") self.file_out.write("\t\tprocessor->Rm.wr.DE=processor->Ra.wr.DE;\n") self.file_out.write("\t\tprocessor->Ra.wr.DE=tmp2;\n") self.file_out.write("\t\ttmp2=processor->Rm.wr.HL;\n") self.file_out.write("\t\tprocessor->Rm.wr.HL=processor->Ra.wr.HL;\n") self.file_out.write("\t\tprocessor->Ra.wr.HL=tmp2;\n") return False if opcode=="IN": if bits==8: self.file_out.write("\t\ttmp2=((word)processor->Rm.br.A)<<8;\n") self.file_out.write("\t\tprocessor->Rm.br.A=Z80free_In(tmp2+((word)Z80free_read_param_8(processor)));\n") return False if opcode=="IN_BC": if bits==8: self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_In(processor->Rm.wr.BC)")+"\n") self.file_out.write("\t\tZ80free_adjustFlagSZP (processor,"+code_read[0]+");\n") self.file_out.write("\t\tZ80free_resFlag(processor,F_H|F_N);\n") return False elif bits==0: self.file_out.write("\t\ttmp1=Z80free_In(processor->Rm.wr.BC);\n") self.file_out.write("\t\tZ80free_adjustFlagSZP (processor,tmp1);\n") self.file_out.write("\t\tZ80free_resFlag(processor,F_H|F_N);\n") return False if opcode=="OUT": if bits==8: self.file_out.write("\t\ttmp1=processor->Rm.br.A;\n") self.file_out.write("\t\ttmp2=((word)tmp1)<<8;\n") self.file_out.write("\t\tZ80free_Out(tmp2+((word)Z80free_read_param_8(processor)),tmp1);\n") return False if opcode=="OUT_BC": if bits==8: self.file_out.write("\t\tZ80free_Out(processor->Rm.wr.BC,"+code_read[0]+");\n") return False if opcode=="DI": self.file_out.write("\t\tprocessor->IFF1=0;\n") self.file_out.write("\t\tprocessor->IFF2=0;\n") return False if opcode=="EI": self.file_out.write("\t\tprocessor->IFF1=2; /* this allows to delay one instruction the interrupts*/\n") self.file_out.write("\t\tprocessor->IFF2=1;\n") return False if opcode=="NEG": self.file_out.write("\t\tprocessor->Rm.br.A=Z80free_doArithmetic(processor,0,processor->Rm.br.A,0,1);\n") return False if opcode=="RETN": self.file_out.write("\t\tprocessor->IFF1=(processor->IFF2 ? 2 : 0);\n") self.file_out.write("\t\tprocessor->PC=Z80free_doPop(processor);\n") return False if opcode=="RETI": self.file_out.write("\t\tprocessor->IFF1=2;\n") self.file_out.write("\t\tprocessor->IFF2=1;\n") self.file_out.write("\t\tprocessor->PC=Z80free_doPop(processor);\n") return False if opcode=="IM": self.file_out.write("\t\tprocessor->IM="+code_read[0]+";\n") return False if opcode=="RLD": self.file_out.write("\t\tZ80free_doRLD(processor);\n") return False if opcode=="RRD": self.file_out.write("\t\tZ80free_doRRD(processor);\n") return False if opcode=="LDI": self.file_out.write("\t\ttmp1=Z80free_Rd(processor->Rm.wr.HL++);\n") self.file_out.write("\t\tZ80free_Wr(processor->Rm.wr.DE++,tmp1);\n") self.file_out.write("\t\ttmp1+=processor->Rm.br.A;\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_3,tmp1&0x08);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_5,tmp1&0x02);\n") self.file_out.write("\t\tprocessor->Rm.wr.BC--;\n") self.file_out.write("\t\tZ80free_resFlag(processor,F_H|F_N);\n") self.file_out.write("\t\tif (processor->Rm.wr.BC)\n") self.file_out.write("\t\t\tZ80free_setFlag(processor,F_PV);\n") self.file_out.write("\t\telse\n") self.file_out.write("\t\t\tZ80free_resFlag(processor,F_PV);\n") return False if opcode=="CPI": self.file_out.write("\t\ttmp2=F_C&processor->Rm.br.F;\n") self.file_out.write("\t\ttmp1=Z80free_doArithmetic(processor,processor->Rm.br.A,Z80free_Rd(processor->Rm.wr.HL++),0,1);\n") self.file_out.write("\t\tif (processor->Rm.br.F&F_H)\n") self.file_out.write("\t\t\ttmp1--;\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_3,tmp1&0x08);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_5,tmp1&0x02);\n") self.file_out.write("\t\tZ80free_setFlag(processor,F_N);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_C,tmp2);\n") self.file_out.write("\t\tprocessor->Rm.wr.BC--;\n") self.file_out.write("\t\tif (processor->Rm.wr.BC)\n") self.file_out.write("\t\t\tZ80free_setFlag(processor,F_PV);\n") self.file_out.write("\t\telse\n") self.file_out.write("\t\t\tZ80free_resFlag(processor,F_PV);\n") return False if opcode=="INI": self.file_out.write("\t\t/*INI, IND, INIR and INDR first decrement B and then uses it*/\n") self.file_out.write("\t\tZ80free_Wr(processor->Rm.wr.HL,Z80free_In(processor->Rm.wr.BC));\n") self.file_out.write("\t\tprocessor->Rm.wr.HL++;\n") self.file_out.write("\t\tprocessor->Rm.br.B=Z80free_doIncDec(processor,processor->Rm.br.B,1);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_N, 1);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_Z, (processor->Rm.br.B == 0));\n") return False if opcode=="OUTI": self.file_out.write("\t\t/*OUTI, OUTD, OTIR and OTDR first decrement B and then uses it*/\n") self.file_out.write("\t\tprocessor->Rm.br.B=Z80free_doIncDec(processor,processor->Rm.br.B,1);\n") self.file_out.write("\t\tZ80free_Out(processor->Rm.wr.BC,Z80free_Rd(processor->Rm.wr.HL));\n") self.file_out.write("\t\tprocessor->Rm.wr.HL++;\n") return False if opcode=="LDD": self.file_out.write("\t\ttmp1=Z80free_Rd(processor->Rm.wr.HL--);\n") self.file_out.write("\t\tZ80free_Wr(processor->Rm.wr.DE--,tmp1);\n") self.file_out.write("\t\ttmp1+=processor->Rm.br.A;\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_3,tmp1&0x08);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_5,tmp1&0x02);\n") self.file_out.write("\t\tprocessor->Rm.wr.BC--;\n") self.file_out.write("\t\tZ80free_resFlag(processor,F_H|F_N);\n") self.file_out.write("\t\tif (processor->Rm.wr.BC)\n") self.file_out.write("\t\t\tZ80free_setFlag(processor,F_PV);\n") self.file_out.write("\t\telse\n") self.file_out.write("\t\t\tZ80free_resFlag(processor,F_PV);\n") return False if opcode=="CPD": self.file_out.write("\t\ttmp2=F_C&processor->Rm.br.F;\n") self.file_out.write("\t\ttmp1=Z80free_doArithmetic(processor,processor->Rm.br.A,Z80free_Rd(processor->Rm.wr.HL--),0,1);\n") self.file_out.write("\t\tif (processor->Rm.br.F&F_H)\n") self.file_out.write("\t\t\ttmp1--;\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_3,tmp1&0x08);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_5,tmp1&0x02);\n") self.file_out.write("\t\tZ80free_setFlag(processor,F_N);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_C,tmp2);\n") self.file_out.write("\t\tprocessor->Rm.wr.BC--;\n") self.file_out.write("\t\tif (processor->Rm.wr.BC)\n") self.file_out.write("\t\t\tZ80free_setFlag(processor,F_PV);\n") self.file_out.write("\t\telse\n") self.file_out.write("\t\t\tZ80free_resFlag(processor,F_PV);\n") return False if opcode=="IND": self.file_out.write("\t\tZ80free_Wr(processor->Rm.wr.HL,Z80free_In(processor->Rm.wr.BC));\n") self.file_out.write("\t\tprocessor->Rm.br.B=Z80free_doIncDec(processor,processor->Rm.br.B,1);\n") self.file_out.write("\t\tprocessor->Rm.wr.HL--;\n") return False if opcode=="OUTD": self.file_out.write("\t\tprocessor->Rm.br.B=Z80free_doIncDec(processor,processor->Rm.br.B,1);\n") self.file_out.write("\t\tZ80free_Out(processor->Rm.wr.BC,Z80free_Rd(processor->Rm.wr.HL));\n") self.file_out.write("\t\tprocessor->Rm.wr.HL--;\n") return False if opcode=="LDIR": self.file_out.write("\t\ttmp1=Z80free_Rd(processor->Rm.wr.HL++);\n") self.file_out.write("\t\tZ80free_Wr(processor->Rm.wr.DE++,tmp1);\n") self.file_out.write("\t\tprocessor->Rm.wr.BC--;\n") self.file_out.write("\t\tif (processor->Rm.wr.BC) {\n") self.file_out.write("\t\t\tprocessor->PC-=2;\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\tZ80free_resFlag(processor,F_H|F_N|F_PV);\n") self.file_out.write("\t\ttmp1+=processor->Rm.br.A;\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_3,tmp1&0x08);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_5,tmp1&0x02);\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="CPIR": self.file_out.write("\t\ttmp2=F_C&processor->Rm.br.F;\n") self.file_out.write("\t\tZ80free_doArithmetic(processor,processor->Rm.br.A,Z80free_Rd(processor->Rm.wr.HL++),0,1);\n") self.file_out.write("\t\tprocessor->Rm.wr.BC--;\n") self.file_out.write("\t\tif ((processor->Rm.wr.BC)&&(!(processor->Rm.br.F&F_Z))) {\n") self.file_out.write("\t\t\tprocessor->PC-=2;\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\tZ80free_resFlag(processor,F_H|F_PV|F_3|F_5);\n") self.file_out.write("\t\t\tZ80free_setFlag(processor,F_N);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_C,tmp2);\n") self.file_out.write("\t\tif (processor->Rm.wr.BC)\n") self.file_out.write("\t\t\tZ80free_setFlag(processor,F_PV);") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="INIR": self.file_out.write("\t\tZ80free_Wr(processor->Rm.wr.HL,Z80free_In(processor->Rm.wr.BC));\n") self.file_out.write("\t\tprocessor->Rm.br.B=Z80free_doIncDec(processor,processor->Rm.br.B,1);\n") self.file_out.write("\t\tprocessor->Rm.wr.HL++;\n") self.file_out.write("\t\tif (processor->Rm.br.B) {\n") self.file_out.write("\t\t\tprocessor->PC-=2;\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="OTIR": self.file_out.write("\t\tprocessor->Rm.br.B=Z80free_doIncDec(processor,processor->Rm.br.B,1);\n") self.file_out.write("\t\tZ80free_Out(processor->Rm.wr.BC,Z80free_Rd(processor->Rm.wr.HL));\n") self.file_out.write("\t\tprocessor->Rm.wr.HL++;\n") self.file_out.write("\t\tif (processor->Rm.br.B) {\n") self.file_out.write("\t\t\tprocessor->PC-=2;\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="LDDR": self.file_out.write("\t\ttmp1=Z80free_Rd(processor->Rm.wr.HL--);\n") self.file_out.write("\t\tZ80free_Wr(processor->Rm.wr.DE--,tmp1);\n") self.file_out.write("\t\tprocessor->Rm.wr.BC--;\n") self.file_out.write("\t\tif (processor->Rm.wr.BC) {\n") self.file_out.write("\t\t\tprocessor->PC-=2;\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\tZ80free_resFlag(processor,F_H|F_N|F_PV);\n") self.file_out.write("\t\ttmp1+=processor->Rm.br.A;\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_3,tmp1&0x08);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_5,tmp1&0x02);\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="CPDR": self.file_out.write("\t\ttmp2=F_C&processor->Rm.br.F;\n") self.file_out.write("\t\tZ80free_doArithmetic(processor,processor->Rm.br.A,Z80free_Rd(processor->Rm.wr.HL--),0,1);\n") self.file_out.write("\t\tprocessor->Rm.wr.BC--;\n") self.file_out.write("\t\tif ((processor->Rm.wr.BC)&&(!(processor->Rm.br.F&F_Z))) {\n") self.file_out.write("\t\t\tprocessor->PC-=2;\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\tZ80free_resFlag(processor,F_H|F_PV|F_3|F_5);\n") self.file_out.write("\t\t\tZ80free_setFlag(processor,F_N);\n") self.file_out.write("\t\tZ80free_valFlag(processor,F_C,tmp2);\n") self.file_out.write("\t\tif (processor->Rm.wr.BC)\n") self.file_out.write("\t\t\tZ80free_setFlag(processor,F_PV);") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="INDR": self.file_out.write("\t\tZ80free_Wr(processor->Rm.wr.HL,Z80free_In(processor->Rm.wr.BC));\n") self.file_out.write("\t\tprocessor->Rm.br.B=Z80free_doIncDec(processor,processor->Rm.br.B,1);\n") self.file_out.write("\t\tprocessor->Rm.wr.HL--;\n") self.file_out.write("\t\tif (processor->Rm.br.B) {\n") self.file_out.write("\t\t\tprocessor->PC-=2;\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="OTDR": self.file_out.write("\t\tprocessor->Rm.br.B=Z80free_doIncDec(processor,processor->Rm.br.B,1);\n") self.file_out.write("\t\tZ80free_Out(processor->Rm.wr.BC,Z80free_Rd(processor->Rm.wr.HL));\n") self.file_out.write("\t\tprocessor->Rm.wr.HL--;\n") self.file_out.write("\t\tif (processor->Rm.br.B) {\n") self.file_out.write("\t\t\tprocessor->PC-=2;\n") self.file_out.write("\t\t\treturn ("+str(tst1)+");\n") self.file_out.write("\t\t} else {\n") self.file_out.write("\t\t\treturn ("+str(tst2)+");\n") self.file_out.write("\t\t}\n") return True if opcode=="RLC": self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doRLC(processor,1,"+code_read[0]+")")+"\n") return False if opcode=="LD_RLC": self.file_out.write("\t\ttmp1=Z80free_doRLC(processor,1,"+code_read[1]+");\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@","tmp1")+"\n") self.file_out.write("\t\t"+code_write[1].replace("@DATA@","tmp1")+"\n") return False if opcode=="RRC": self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doRRC(processor,1,"+code_read[0]+")")+"\n") return False if opcode=="LD_RRC": self.file_out.write("\t\ttmp1=Z80free_doRRC(processor,1,"+code_read[1]+");\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@","tmp1")+"\n") self.file_out.write("\t\t"+code_write[1].replace("@DATA@","tmp1")+"\n") return False if opcode=="RL": self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doRL(processor,1,"+code_read[0]+")")+"\n") return False if opcode=="LD_RL": self.file_out.write("\t\ttmp1=Z80free_doRL(processor,1,"+code_read[1]+");\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@","tmp1")+"\n") self.file_out.write("\t\t"+code_write[1].replace("@DATA@","tmp1")+"\n") return False if opcode=="RR": self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doRR(processor,1,"+code_read[0]+")")+"\n") return False if opcode=="LD_RR": self.file_out.write("\t\ttmp1=Z80free_doRR(processor,1,"+code_read[1]+");\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@","tmp1")+"\n") self.file_out.write("\t\t"+code_write[1].replace("@DATA@","tmp1")+"\n") return False if opcode=="SLA": self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doSL(processor,0,"+code_read[0]+")")+"\n") return False if opcode=="LD_SLA": self.file_out.write("\t\ttmp1=Z80free_doSL(processor,0,"+code_read[1]+");\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@","tmp1")+"\n") self.file_out.write("\t\t"+code_write[1].replace("@DATA@","tmp1")+"\n") return False if opcode=="SRA": self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doSR(processor,1,"+code_read[0]+")")+"\n") return False if opcode=="LD_SRA": self.file_out.write("\t\ttmp1=Z80free_doSR(processor,1,"+code_read[1]+");\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@","tmp1")+"\n") self.file_out.write("\t\t"+code_write[1].replace("@DATA@","tmp1")+"\n") return False if opcode=="SLL": self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doSL(processor,1,"+code_read[0]+")")+"\n") return False if opcode=="LD_SLL": self.file_out.write("\t\ttmp1=Z80free_doSL(processor,1,"+code_read[1]+");\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@","tmp1")+"\n") self.file_out.write("\t\t"+code_write[1].replace("@DATA@","tmp1")+"\n") return False if opcode=="SRL": self.file_out.write("\t\t"+code_write[0].replace("@DATA@","Z80free_doSR(processor,0,"+code_read[0]+")")+"\n") return False if opcode=="LD_SRL": self.file_out.write("\t\ttmp1=Z80free_doSR(processor,0,"+code_read[1]+");\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@","tmp1")+"\n") self.file_out.write("\t\t"+code_write[1].replace("@DATA@","tmp1")+"\n") return False if opcode=="BIT": self.file_out.write("\t\tZ80free_doBIT(processor,"+code_read[0]+","+code_read[1]+");\n") return False if opcode=="RES": self.file_out.write("\t\t"+code_write[1].replace("@DATA@","Z80free_doSetRes(processor,0,"+code_read[0]+","+code_read[1]+")")+"\n") return False if opcode=="LD_RES": self.file_out.write("\t\ttmp1=Z80free_doSetRes(processor,0,"+code_read[1]+","+code_read[2]+");\n") self.file_out.write("\t\t"+code_write[2].replace("@DATA@","tmp1")+"\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@","tmp1")+"\n") return False if opcode=="SET": self.file_out.write("\t\t"+code_write[1].replace("@DATA@","Z80free_doSetRes(processor,1,"+code_read[0]+","+code_read[1]+")")+"\n") return False if opcode=="LD_SET": self.file_out.write("\t\ttmp1=Z80free_doSetRes(processor,1,"+code_read[1]+","+code_read[2]+");\n") self.file_out.write("\t\t"+code_write[2].replace("@DATA@","tmp1")+"\n") self.file_out.write("\t\t"+code_write[0].replace("@DATA@","tmp1")+"\n") return False if opcode=="DEFAULT": self.file_out.write("\t\treturn (Z80free_codes(processor,opcode));\n") return True self.unknown_opcode() class parser_XX(z80_parser): def __init__(self,filename): self.codes="XX" z80_parser.__init__(self,filename) def write_start_code(self): """ Writes the first part of the C code for the file """ self.file_out.write('#include "Z80free.h"\n\n') self.file_out.write('int '+self.filename+' (Z80FREE *processor,byte opcode) {\n') self.file_out.write('\tstatic byte tmp1;\n') self.file_out.write('\tstatic word tmp2;\n') self.file_out.write('\n') def write_end_code(self): """ writes the last part of the C code for the file """ self.file_out.write('\treturn -1;\n') self.file_out.write('}\n') class parser_CB(z80_parser): def __init__(self,filename): self.codes="CB" z80_parser.__init__(self,filename) def write_start_code(self): """ Writes the first part of the C code for the file """ self.file_out.write('#include "Z80free.h"\n\n') self.file_out.write('int '+self.filename+' (Z80FREE *processor,byte opcode) {\n') #self.file_out.write('\tstatic byte tmp1;\n') #self.file_out.write('\tstatic word tmp2;\n') self.file_out.write('\n') def write_end_code(self): """ writes the last part of the C code for the file """ self.file_out.write('\treturn -1;\n') self.file_out.write('}\n') class parser_ED(z80_parser): def __init__(self,filename): self.codes="ED" z80_parser.__init__(self,filename) def write_start_code(self): """ Writes the first part of the C code for the file """ self.file_out.write('#include "Z80free.h"\n\n') self.file_out.write('int '+self.filename+' (Z80FREE *processor,byte opcode) {\n') self.file_out.write('\tstatic byte tmp1;\n') self.file_out.write('\tstatic byte tmp2;\n') self.file_out.write('\n') def write_end_code(self): """ writes the last part of the C code for the file """ self.file_out.write('\treturn -1;\n') self.file_out.write('}\n') class parser_DD(z80_parser): def __init__(self,filename): self.codes="DD" z80_parser.__init__(self,filename) def write_start_code(self): """ Writes the first part of the C code for the file """ self.file_out.write('#include "Z80free.h"\n\n') self.file_out.write('int '+self.filename+' (Z80FREE *processor,byte opcode) {\n') self.file_out.write('\tstatic byte tmp1;\n') self.file_out.write('\tstatic word tmp2;\n') self.file_out.write('\n') def write_end_code(self): """ writes the last part of the C code for the file """ self.file_out.write('\treturn -1;\n') self.file_out.write('}\n') class parser_FD(z80_parser): def __init__(self,filename): self.codes="FD" z80_parser.__init__(self, filename) def write_start_code(self): """ Writes the first part of the C code for the file """ self.file_out.write('#include "Z80free.h"\n\n') self.file_out.write('int '+self.filename+' (Z80FREE *processor,byte opcode) {\n') self.file_out.write('\tstatic byte tmp1;\n') self.file_out.write('\tstatic word tmp2;\n') self.file_out.write('\n') def write_end_code(self): """ writes the last part of the C code for the file """ self.file_out.write('\treturn -1;\n') self.file_out.write('}\n') class parser_DDCB(z80_parser): def __init__(self,filename): self.codes="DDCB" z80_parser.__init__(self, filename) def write_start_code(self): """ Writes the first part of the C code for the file """ self.file_out.write('#include "Z80free.h"\n\n') self.file_out.write('int '+self.filename+' (Z80FREE *processor,byte d1) {\n') self.file_out.write('\tstatic byte tmp1;\n') #self.file_out.write('\tstatic word tmp2;\n') self.file_out.write('\tstatic byte opcode;\n') self.file_out.write('\topcode=Z80free_Rd(processor->PC++);\n') self.file_out.write('\n') def write_end_code(self): """ writes the last part of the C code for the file """ self.file_out.write('\treturn -1;\n') self.file_out.write('}\n') class parser_FDCB(z80_parser): def __init__(self,filename): self.codes="FDCB" z80_parser.__init__(self, filename) def write_start_code(self): """ Writes the first part of the C code for the file """ self.file_out.write('#include "Z80free.h"\n\n') self.file_out.write('int '+self.filename+' (Z80FREE *processor,byte d1) {\n') self.file_out.write('\tstatic byte tmp1;\n') #self.file_out.write('\tstatic word tmp2;\n') self.file_out.write('\tstatic byte opcode;\n') self.file_out.write('\topcode=Z80free_Rd(processor->PC++);\n') self.file_out.write('\n') def write_end_code(self): """ writes the last part of the C code for the file """ self.file_out.write('\treturn -1;\n') self.file_out.write('}\n') aparser=parser_XX("Z80free_codes") aparser=parser_CB("Z80free_codesCB") aparser=parser_DD("Z80free_codesDD") aparser=parser_DDCB("Z80free_codesDDCB") aparser=parser_ED("Z80free_codesED") aparser=parser_FD("Z80free_codesFD") aparser=parser_FDCB("Z80free_codesFDCB")
gpl-3.0
safwanrahman/kuma
kuma/wiki/management/commands/generate_sphinx_template.py
3
2377
import datetime from django.conf import settings from django.core.management.base import NoArgsCommand from django.shortcuts import render from django.test import RequestFactory, override_settings from django.utils import translation from html5lib import constants as html5lib_constants from kuma.wiki.content import parse class Command(NoArgsCommand): def handle(self, *args, **options): # Not ideal, but we need to temporarily remove inline elements as a # void/ignored element # TO DO: Can this clone code be shortened? new_void_set = set() for item in html5lib_constants.voidElements: new_void_set.add(item) new_void_set.remove('link') new_void_set.remove('img') html5lib_constants.voidElements = frozenset(new_void_set) # Create a mock request for the sake of rendering the template request = RequestFactory().get('/') request.LANGUAGE_CODE = settings.LANGUAGE_CODE # for Jinja2 translation.activate(settings.LANGUAGE_CODE) # for context var LANG host = 'developer.mozilla.org' request.META['SERVER_NAME'] = host this_year = datetime.date.today().year # Load the page with sphinx template with override_settings( ALLOWED_HOSTS=[host], SITE_URL=settings.PRODUCTION_URL, DEBUG=False): response = render(request, 'wiki/sphinx.html', {'is_sphinx': True, 'this_year': this_year}) content = response.content # Use a filter to make links absolute tool = parse(content, is_full_document=True) content = tool.absolutizeAddresses( base_url=settings.PRODUCTION_URL, tag_attributes={ 'a': 'href', 'img': 'src', 'form': 'action', 'link': 'href', 'script': 'src' }).serialize() # Make in-comment script src absolute for IE content = content.replace('src="/static/', 'src="%s/static/' % settings.PRODUCTION_URL) # Fix missing DOCTYPE assert content.startswith("<html") content = u"<!DOCTYPE html>\n" + content # Output the response print content.encode('utf8')
mpl-2.0
pipermerriam/web3.py
tests/core/contracts/conftest.py
1
41212
import pytest import json from eth_utils import ( event_signature_to_log_topic, ) CONTRACT_CODE = "0x606060405261022e806100126000396000f360606040523615610074576000357c01000000000000000000000000000000000000000000000000000000009004806316216f391461007657806361bc221a146100995780637cf5dab0146100bc578063a5f3c23b146100e8578063d09de08a1461011d578063dcf537b11461014057610074565b005b610083600480505061016c565b6040518082815260200191505060405180910390f35b6100a6600480505061017f565b6040518082815260200191505060405180910390f35b6100d26004808035906020019091905050610188565b6040518082815260200191505060405180910390f35b61010760048080359060200190919080359060200190919050506101ea565b6040518082815260200191505060405180910390f35b61012a6004805050610201565b6040518082815260200191505060405180910390f35b6101566004808035906020019091905050610217565b6040518082815260200191505060405180910390f35b6000600d9050805080905061017c565b90565b60006000505481565b6000816000600082828250540192505081905550600060005054905080507f3496c3ede4ec3ab3686712aa1c238593ea6a42df83f98a5ec7df9834cfa577c5816040518082815260200191505060405180910390a18090506101e5565b919050565b6000818301905080508090506101fb565b92915050565b600061020d6001610188565b9050610214565b90565b60006007820290508050809050610229565b91905056" # noqa: E501 CONTRACT_RUNTIME = "0x60606040523615610074576000357c01000000000000000000000000000000000000000000000000000000009004806316216f391461007657806361bc221a146100995780637cf5dab0146100bc578063a5f3c23b146100e8578063d09de08a1461011d578063dcf537b11461014057610074565b005b610083600480505061016c565b6040518082815260200191505060405180910390f35b6100a6600480505061017f565b6040518082815260200191505060405180910390f35b6100d26004808035906020019091905050610188565b6040518082815260200191505060405180910390f35b61010760048080359060200190919080359060200190919050506101ea565b6040518082815260200191505060405180910390f35b61012a6004805050610201565b6040518082815260200191505060405180910390f35b6101566004808035906020019091905050610217565b6040518082815260200191505060405180910390f35b6000600d9050805080905061017c565b90565b60006000505481565b6000816000600082828250540192505081905550600060005054905080507f3496c3ede4ec3ab3686712aa1c238593ea6a42df83f98a5ec7df9834cfa577c5816040518082815260200191505060405180910390a18090506101e5565b919050565b6000818301905080508090506101fb565b92915050565b600061020d6001610188565b9050610214565b90565b60006007820290508050809050610229565b91905056" # noqa: E501 CONTRACT_ABI = json.loads('[{"constant":false,"inputs":[],"name":"return13","outputs":[{"name":"result","type":"int256"}],"type":"function"},{"constant":true,"inputs":[],"name":"counter","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[{"name":"amt","type":"uint256"}],"name":"increment","outputs":[{"name":"result","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[{"name":"a","type":"int256"},{"name":"b","type":"int256"}],"name":"add","outputs":[{"name":"result","type":"int256"}],"type":"function"},{"constant":false,"inputs":[],"name":"increment","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[{"name":"a","type":"int256"}],"name":"multiply7","outputs":[{"name":"result","type":"int256"}],"type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"value","type":"uint256"}],"name":"Increased","type":"event"}]') # noqa: E501 @pytest.fixture(scope="session") def MATH_CODE(): return CONTRACT_CODE @pytest.fixture(scope="session") def MATH_RUNTIME(): return CONTRACT_RUNTIME @pytest.fixture(scope="session") def MATH_ABI(): return CONTRACT_ABI @pytest.fixture() def MathContract(web3, MATH_ABI, MATH_CODE, MATH_RUNTIME): return web3.eth.contract( abi=MATH_ABI, bytecode=MATH_CODE, bytecode_runtime=MATH_RUNTIME, ) CONTRACT_SIMPLE_CONSTRUCTOR_CODE = '0x60606040526003600055602c8060156000396000f3606060405260e060020a600035046373d4a13a8114601a575b005b602260005481565b6060908152602090f3' # noqa: E501 CONTRACT_SIMPLE_CONSTRUCTOR_RUNTIME = '0x606060405260e060020a600035046373d4a13a8114601a575b005b602260005481565b6060908152602090f3' # noqa: E501 CONTRACT_SIMPLE_CONSTRUCTOR_ABI = json.loads('[{"constant":true,"inputs":[],"name":"data","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"inputs":[],"type":"constructor"}]') # noqa: E501 @pytest.fixture(scope="session") def SIMPLE_CONSTRUCTOR_CODE(): return CONTRACT_SIMPLE_CONSTRUCTOR_CODE @pytest.fixture(scope="session") def SIMPLE_CONSTRUCTOR_RUNTIME(): return CONTRACT_SIMPLE_CONSTRUCTOR_RUNTIME @pytest.fixture(scope="session") def SIMPLE_CONSTRUCTOR_ABI(): return CONTRACT_SIMPLE_CONSTRUCTOR_ABI @pytest.fixture() def SimpleConstructorContract(web3, SIMPLE_CONSTRUCTOR_CODE, SIMPLE_CONSTRUCTOR_RUNTIME, SIMPLE_CONSTRUCTOR_ABI): return web3.eth.contract( abi=SIMPLE_CONSTRUCTOR_ABI, bytecode=SIMPLE_CONSTRUCTOR_CODE, bytecode_runtime=SIMPLE_CONSTRUCTOR_RUNTIME, ) CONTRACT_WITH_CONSTRUCTOR_ARGUMENTS_CODE = "0x60606040818152806066833960a09052516080516000918255600155603e908190602890396000f3606060405260e060020a600035046388ec134681146024578063d4c46c7614602c575b005b603460005481565b603460015481565b6060908152602090f3" # noqa: E501 CONTRACT_WITH_CONSTRUCTOR_ARGUMENTS_RUNTIME = "0x606060405260e060020a600035046388ec134681146024578063d4c46c7614602c575b005b603460005481565b603460015481565b6060908152602090f3" # noqa: E501 CONTRACT_WITH_CONSTRUCTOR_ARGUMENTS_ABI = json.loads('[{"constant":true,"inputs":[],"name":"data_a","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"data_b","outputs":[{"name":"","type":"bytes32"}],"type":"function"},{"inputs":[{"name":"a","type":"uint256"},{"name":"b","type":"bytes32"}],"type":"constructor"}]') # noqa: E501 @pytest.fixture() def WITH_CONSTRUCTOR_ARGUMENTS_CODE(): return CONTRACT_WITH_CONSTRUCTOR_ARGUMENTS_CODE @pytest.fixture() def WITH_CONSTRUCTOR_ARGUMENTS_RUNTIME(): return CONTRACT_WITH_CONSTRUCTOR_ARGUMENTS_RUNTIME @pytest.fixture() def WITH_CONSTRUCTOR_ARGUMENTS_ABI(): return CONTRACT_WITH_CONSTRUCTOR_ARGUMENTS_ABI @pytest.fixture() def WithConstructorArgumentsContract(web3, WITH_CONSTRUCTOR_ARGUMENTS_CODE, WITH_CONSTRUCTOR_ARGUMENTS_RUNTIME, WITH_CONSTRUCTOR_ARGUMENTS_ABI): return web3.eth.contract( abi=WITH_CONSTRUCTOR_ARGUMENTS_ABI, bytecode=WITH_CONSTRUCTOR_ARGUMENTS_CODE, bytecode_runtime=WITH_CONSTRUCTOR_ARGUMENTS_RUNTIME, ) CONTRACT_WITH_CONSTRUCTOR_ADDRESS_CODE = "0x6060604052604051602080607683395060806040525160008054600160a060020a031916821790555060428060346000396000f3606060405260e060020a600035046334664e3a8114601a575b005b603860005473ffffffffffffffffffffffffffffffffffffffff1681565b6060908152602090f3" # noqa: E501 CONTRACT_WITH_CONSTRUCTOR_ADDRESS_RUNTIME = "0x606060405260e060020a600035046334664e3a8114601a575b005b603860005473ffffffffffffffffffffffffffffffffffffffff1681565b6060908152602090f3" # noqa: E501 CONTRACT_WITH_CONSTRUCTOR_ADDRESS_ABI = json.loads('[{"constant":true,"inputs":[],"name":"testAddr","outputs":[{"name":"","type":"address"}],"type":"function"},{"inputs":[{"name":"_testAddr","type":"address"}],"type":"constructor"}]') # noqa: E501 @pytest.fixture() def WITH_CONSTRUCTOR_ADDRESS_CODE(): return CONTRACT_WITH_CONSTRUCTOR_ADDRESS_CODE @pytest.fixture() def WITH_CONSTRUCTOR_ADDRESS_RUNTIME(): return CONTRACT_WITH_CONSTRUCTOR_ADDRESS_RUNTIME @pytest.fixture() def WITH_CONSTRUCTOR_ADDRESS_ABI(): return CONTRACT_WITH_CONSTRUCTOR_ADDRESS_ABI @pytest.fixture() def WithConstructorAddressArgumentsContract(web3, WITH_CONSTRUCTOR_ADDRESS_CODE, WITH_CONSTRUCTOR_ADDRESS_RUNTIME, WITH_CONSTRUCTOR_ADDRESS_ABI): return web3.eth.contract( abi=WITH_CONSTRUCTOR_ADDRESS_ABI, bytecode=WITH_CONSTRUCTOR_ADDRESS_CODE, bytecode_runtime=WITH_CONSTRUCTOR_ADDRESS_RUNTIME, ) @pytest.fixture() def AddressReflectorContract(web3): return web3.eth.contract( abi=json.loads('[{"constant":true,"inputs":[{"name":"arg","type":"address"}],"name":"reflect","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":true,"inputs":[{"name":"arg","type":"address[]"}],"name":"reflect","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"pure","type":"function"}]'), # noqa: 501 bytecode="6060604052341561000f57600080fd5b6101ca8061001e6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630b816c1614610048578063c04d11fc146100c157600080fd5b341561005357600080fd5b61007f600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610170565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156100cc57600080fd5b61011960048080359060200190820180359060200190808060200260200160405190810160405280939291908181526020018383602002808284378201915050505050509190505061017a565b6040518080602001828103825283818151815260200191508051906020019060200280838360005b8381101561015c578082015181840152602081019050610141565b505050509050019250505060405180910390f35b6000819050919050565b61018261018a565b819050919050565b6020604051908101604052806000815250905600a165627a7a723058206b15d98a803b91327d94f943e9712291539701b2f7370e10f5873633941484930029", # noqa: 501 bytecode_runtime="60606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630b816c1614610048578063c04d11fc146100c157600080fd5b341561005357600080fd5b61007f600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610170565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156100cc57600080fd5b61011960048080359060200190820180359060200190808060200260200160405190810160405280939291908181526020018383602002808284378201915050505050509190505061017a565b6040518080602001828103825283818151815260200191508051906020019060200280838360005b8381101561015c578082015181840152602081019050610141565b505050509050019250505060405180910390f35b6000819050919050565b61018261018a565b819050919050565b6020604051908101604052806000815250905600a165627a7a723058206b15d98a803b91327d94f943e9712291539701b2f7370e10f5873633941484930029", # noqa: 501 ) CONTRACT_STRING_CODE = "0x6060604052604051610496380380610496833981016040528051018060006000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f10608d57805160ff19168380011785555b50607c9291505b8082111560ba57838155600101606b565b5050506103d8806100be6000396000f35b828001600101855582156064579182015b828111156064578251826000505591602001919060010190609e565b509056606060405260e060020a600035046320965255811461003c57806330de3cee1461009f5780633fa4f245146100c457806393a0935214610121575b005b6101c7600060608181528154602060026001831615610100026000190190921691909104601f810182900490910260a0908101604052608082815292939190828280156102605780601f1061023557610100808354040283529160200191610260565b6101c7600060609081526101a06040526101006080818152906102d860a03990505b90565b6101c760008054602060026001831615610100026000190190921691909104601f810182900490910260809081016040526060828152929190828280156102975780601f1061026c57610100808354040283529160200191610297565b60206004803580820135601f81018490049093026080908101604052606084815261003a946024939192918401918190838280828437509496505050505050508060006000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061029f57805160ff19168380011785555b506102cf9291505b808211156102d4578381556001016101b4565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156102275780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b820191906000526020600020905b81548152906001019060200180831161024357829003601f168201915b505050505090506100c1565b820191906000526020600020905b81548152906001019060200180831161027a57829003601f168201915b505050505081565b828001600101855582156101ac579182015b828111156101ac5782518260005055916020019190600101906102b1565b505050565b509056000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff" # noqa: E501 CONTRACT_STRING_RUNTIME = "0x606060405260e060020a600035046320965255811461003c57806330de3cee1461009f5780633fa4f245146100c457806393a0935214610121575b005b6101c7600060608181528154602060026001831615610100026000190190921691909104601f810182900490910260a0908101604052608082815292939190828280156102605780601f1061023557610100808354040283529160200191610260565b6101c7600060609081526101a06040526101006080818152906102d860a03990505b90565b6101c760008054602060026001831615610100026000190190921691909104601f810182900490910260809081016040526060828152929190828280156102975780601f1061026c57610100808354040283529160200191610297565b60206004803580820135601f81018490049093026080908101604052606084815261003a946024939192918401918190838280828437509496505050505050508060006000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061029f57805160ff19168380011785555b506102cf9291505b808211156102d4578381556001016101b4565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156102275780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b820191906000526020600020905b81548152906001019060200180831161024357829003601f168201915b505050505090506100c1565b820191906000526020600020905b81548152906001019060200180831161027a57829003601f168201915b505050505081565b828001600101855582156101ac579182015b828111156101ac5782518260005055916020019190600101906102b1565b505050565b509056000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff" # noqa: E501 CONTRACT_STRING_ABI = json.loads('[{"constant":false,"inputs":[],"name":"getValue","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":false,"inputs":[],"name":"constValue","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":true,"inputs":[],"name":"value","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":false,"inputs":[{"name":"_value","type":"string"}],"name":"setValue","outputs":[],"type":"function"},{"inputs":[{"name":"_value","type":"string"}],"type":"constructor"}]') # noqa: E501 @pytest.fixture() def STRING_CODE(): return CONTRACT_STRING_CODE @pytest.fixture() def STRING_RUNTIME(): return CONTRACT_STRING_RUNTIME @pytest.fixture() def STRING_ABI(): return CONTRACT_STRING_ABI @pytest.fixture() def STRING_CONTRACT(STRING_CODE, STRING_RUNTIME, STRING_ABI): return { 'bytecode': STRING_CODE, 'bytecode_runtime': STRING_RUNTIME, 'abi': STRING_ABI, } @pytest.fixture() def StringContract(web3, STRING_CONTRACT): return web3.eth.contract(**STRING_CONTRACT) CONTRACT_BYTES_CODE = "60606040526040805190810160405280600281526020017f01230000000000000000000000000000000000000000000000000000000000008152506000908051906020019061004f929190610096565b50341561005b57600080fd5b604051610723380380610723833981016040528080518201919050505b806001908051906020019061008e929190610116565b505b506101bb565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106100d757805160ff1916838001178555610105565b82800160010185558215610105579182015b828111156101045782518255916020019190600101906100e9565b5b5090506101129190610196565b5090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061015757805160ff1916838001178555610185565b82800160010185558215610185579182015b82811115610184578251825591602001919060010190610169565b5b5090506101929190610196565b5090565b6101b891905b808211156101b457600081600090555060010161019c565b5090565b90565b610559806101ca6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063209652551461005f57806330de3cee146100ee5780633fa4f2451461017d578063439970aa1461020c575b600080fd5b341561006a57600080fd5b610072610269565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100b35780820151818401525b602081019050610097565b50505050905090810190601f1680156100e05780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34156100f957600080fd5b610101610312565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101425780820151818401525b602081019050610126565b50505050905090810190601f16801561016f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561018857600080fd5b6101906103bb565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101d15780820151818401525b6020810190506101b5565b50505050905090810190601f1680156101fe5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561021757600080fd5b610267600480803590602001908201803590602001908080601f01602080910402602001604051908101604052809392919081815260200183838082843782019150505050505091905050610459565b005b610271610474565b60018054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156103075780601f106102dc57610100808354040283529160200191610307565b820191906000526020600020905b8154815290600101906020018083116102ea57829003601f168201915b505050505090505b90565b61031a610474565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156103b05780601f10610385576101008083540402835291602001916103b0565b820191906000526020600020905b81548152906001019060200180831161039357829003601f168201915b505050505090505b90565b60018054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156104515780601f1061042657610100808354040283529160200191610451565b820191906000526020600020905b81548152906001019060200180831161043457829003601f168201915b505050505081565b806001908051906020019061046f929190610488565b505b50565b602060405190810160405280600081525090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106104c957805160ff19168380011785556104f7565b828001600101855582156104f7579182015b828111156104f65782518255916020019190600101906104db565b5b5090506105049190610508565b5090565b61052a91905b8082111561052657600081600090555060010161050e565b5090565b905600a165627a7a723058203ff916ee91add6247b20793745d1c6a8d8dcaa49d8c84fbbabb5c966fd9b6fc90029" # noqa: E501 CONTRACT_BYTES_RUNTIME = "60606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063209652551461005f57806330de3cee146100ee5780633fa4f2451461017d578063439970aa1461020c575b600080fd5b341561006a57600080fd5b610072610269565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100b35780820151818401525b602081019050610097565b50505050905090810190601f1680156100e05780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34156100f957600080fd5b610101610312565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101425780820151818401525b602081019050610126565b50505050905090810190601f16801561016f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561018857600080fd5b6101906103bb565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101d15780820151818401525b6020810190506101b5565b50505050905090810190601f1680156101fe5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561021757600080fd5b610267600480803590602001908201803590602001908080601f01602080910402602001604051908101604052809392919081815260200183838082843782019150505050505091905050610459565b005b610271610474565b60018054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156103075780601f106102dc57610100808354040283529160200191610307565b820191906000526020600020905b8154815290600101906020018083116102ea57829003601f168201915b505050505090505b90565b61031a610474565b60008054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156103b05780601f10610385576101008083540402835291602001916103b0565b820191906000526020600020905b81548152906001019060200180831161039357829003601f168201915b505050505090505b90565b60018054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156104515780601f1061042657610100808354040283529160200191610451565b820191906000526020600020905b81548152906001019060200180831161043457829003601f168201915b505050505081565b806001908051906020019061046f929190610488565b505b50565b602060405190810160405280600081525090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106104c957805160ff19168380011785556104f7565b828001600101855582156104f7579182015b828111156104f65782518255916020019190600101906104db565b5b5090506105049190610508565b5090565b61052a91905b8082111561052657600081600090555060010161050e565b5090565b905600a165627a7a723058203ff916ee91add6247b20793745d1c6a8d8dcaa49d8c84fbbabb5c966fd9b6fc90029" # noqa: E501 CONTRACT_BYTES_ABI = json.loads('[{"constant":false,"inputs":[],"name":"getValue","outputs":[{"name":"","type":"bytes"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"constValue","outputs":[{"name":"","type":"bytes"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"value","outputs":[{"name":"","type":"bytes"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_value","type":"bytes"}],"name":"setValue","outputs":[],"payable":false,"type":"function"},{"inputs":[{"name":"_value","type":"bytes"}],"payable":false,"type":"constructor"}]') # noqa: E501 @pytest.fixture() def BYTES_CODE(): return CONTRACT_BYTES_CODE @pytest.fixture() def BYTES_RUNTIME(): return CONTRACT_BYTES_RUNTIME @pytest.fixture() def BYTES_ABI(): return CONTRACT_BYTES_ABI @pytest.fixture() def BYTES_CONTRACT(BYTES_CODE, BYTES_RUNTIME, BYTES_ABI): return { 'bytecode': BYTES_CODE, 'bytecode_runtime': BYTES_RUNTIME, 'abi': BYTES_ABI, } @pytest.fixture() def BytesContract(web3, BYTES_CONTRACT): return web3.eth.contract(**BYTES_CONTRACT) CONTRACT_BYTES32_CODE = "60606040527f0123012301230123012301230123012301230123012301230123012301230123600090600019169055341561003957600080fd5b6040516020806101e2833981016040528080519060200190919050505b80600181600019169055505b505b61016f806100736000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063209652551461005f57806330de3cee146100905780633fa4f245146100c157806358825b10146100f2575b600080fd5b341561006a57600080fd5b610072610119565b60405180826000191660001916815260200191505060405180910390f35b341561009b57600080fd5b6100a3610124565b60405180826000191660001916815260200191505060405180910390f35b34156100cc57600080fd5b6100d461012e565b60405180826000191660001916815260200191505060405180910390f35b34156100fd57600080fd5b610117600480803560001916906020019091905050610134565b005b600060015490505b90565b6000805490505b90565b60015481565b80600181600019169055505b505600a165627a7a7230582043b15c20378b1603d330561258ccf291d08923a4c25fa8af0d590a010a6322180029" # noqa: E501 CONTRACT_BYTES32_RUNTIME = "60606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063209652551461005f57806330de3cee146100905780633fa4f245146100c157806358825b10146100f2575b600080fd5b341561006a57600080fd5b610072610119565b60405180826000191660001916815260200191505060405180910390f35b341561009b57600080fd5b6100a3610124565b60405180826000191660001916815260200191505060405180910390f35b34156100cc57600080fd5b6100d461012e565b60405180826000191660001916815260200191505060405180910390f35b34156100fd57600080fd5b610117600480803560001916906020019091905050610134565b005b600060015490505b90565b6000805490505b90565b60015481565b80600181600019169055505b505600a165627a7a7230582043b15c20378b1603d330561258ccf291d08923a4c25fa8af0d590a010a6322180029" # noqa: E501 CONTRACT_BYTES32_ABI = json.loads('[{"constant":false,"inputs":[],"name":"getValue","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"constValue","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"value","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_value","type":"bytes32"}],"name":"setValue","outputs":[],"payable":false,"type":"function"},{"inputs":[{"name":"_value","type":"bytes32"}],"payable":false,"type":"constructor"}]') # noqa: E501 @pytest.fixture() def BYTES32_CODE(): return CONTRACT_BYTES32_CODE @pytest.fixture() def BYTES32_RUNTIME(): return CONTRACT_BYTES32_RUNTIME @pytest.fixture() def BYTES32_ABI(): return CONTRACT_BYTES32_ABI @pytest.fixture() def BYTES32_CONTRACT(BYTES32_CODE, BYTES32_RUNTIME, BYTES32_ABI): return { 'bytecode': BYTES32_CODE, 'bytecode_runtime': BYTES32_RUNTIME, 'abi': BYTES32_ABI, } @pytest.fixture() def Bytes32Contract(web3, BYTES32_CONTRACT): return web3.eth.contract(**BYTES32_CONTRACT) CONTRACT_EMITTER_CODE = "0x606060405234610000575b610772806100186000396000f36060604052361561006c5760e060020a60003504630bb563d6811461007157806317c0c180146100c657806320f0256e146100d857806390b41d8b146100f65780639c3770531461010e578063aa6fd82214610129578063acabb9ed1461013e578063e17bf956146101d0575b610000565b34610000576100c4600480803590602001908201803590602001908080601f0160208091040260200160405190810160405280939291908181526020018383808284375094965061022595505050505050565b005b34610000576100c46004356102b8565b005b34610000576100c460043560243560443560643560843561031e565b005b34610000576100c46004356024356044356103e0565b005b34610000576100c46004356024356044356064356104b3565b005b34610000576100c4600435602435610563565b005b34610000576100c4600480803590602001908201803590602001908080601f0160208091040260200160405190810160405280939291908181526020018383808284375050604080516020601f89358b0180359182018390048302840183019094528083529799988101979196509182019450925082915084018382808284375094965061061895505050505050565b005b34610000576100c4600480803590602001908201803590602001908080601f016020809104026020016040519081016040528093929190818152602001838380828437509496506106df95505050505050565b005b7fa95e6e2a182411e7a6f9ed114a85c3761d87f9b8f453d842c71235aa64fff99f8160405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156102a75780820380516001836020036101000a031916815260200191505b509250505060405180910390a15b50565b600181600b81116100005714156102f7576040517f1e86022f78f8d04f8e3dfd13a2bdb280403e6632877c0dbee5e4eeb259908a5c90600090a16102b5565b600081600b811161000057141561006c57604051600090a06102b5565b610000565b5b5b50565b600585600b811161000057141561037b5760408051858152602081018590528082018490526060810183905290517ff039d147f23fe975a4254bdf6b1502b8c79132ae1833986b7ccef2638e73fdf99181900360800190a16103d7565b600b85600b811161000057141561006c5780827fa30ece802b64cd2b7e57dabf4010aabf5df26d1556977affb07b98a77ad955b58686604051808381526020018281526020019250505060405180910390a36103d7565b610000565b5b5b5050505050565b600383600b811161000057141561043157604080518381526020810183905281517fdf0cb1dea99afceb3ea698d62e705b736f1345a7eee9eb07e63d1f8f556c1bc5929181900390910190a16104ab565b600983600b811161000057141561047d5760408051838152905182917f057bc32826fbe161da1c110afcdcae7c109a8b69149f727fc37a603c60ef94ca919081900360200190a26104ab565b600883600b811161000057141561006c57604080518381529051829181900360200190a16104ab565b610000565b5b5b5b505050565b600484600b811161000057141561050957604080518481526020810184905280820183905290517f4a25b279c7c585f25eda9788ac9420ebadae78ca6b206a0e6ab488fd81f550629181900360600190a161055b565b600a84600b811161000057141561006c57604080518481529051829184917ff16c999b533366ca5138d78e85da51611089cd05749f098d6c225d4cd42ee6ec9181900360200190a361055b565b610000565b5b5b50505050565b600282600b81116100005714156105ac576040805182815290517f56d2ef3c5228bf5d88573621e325a4672ab50e033749a601e4f4a5e1dce905d49181900360200190a1610611565b600782600b81116100005714156105ed5760405181907ff70fe689e290d8ce2b2a388ac28db36fbb0e16a6d89c6804c461f65a1b40bb1590600090a2610611565b600682600b811161000057141561006c576040518190600090a1610611565b610000565b5b5b5b5050565b81604051808280519060200190808383829060006004602084601f0104600302600f01f15090500191505060405180910390207fe77cf33df73da7bc2e253a2dae617e6f15e4e337eaa462a108903af4643d1b758260405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106cd5780820380516001836020036101000a031916815260200191505b509250505060405180910390a25b5050565b7f532fd6ea96cfb78bb46e09279a26828b8b493de1a2b8b1ee1face527978a15a58160405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156102a75780820380516001836020036101000a031916815260200191505b509250505060405180910390a15b5056" # noqa: E501 CONTRACT_EMITTER_RUNTIME = "0x6060604052361561006c5760e060020a60003504630bb563d6811461007157806317c0c180146100c657806320f0256e146100d857806390b41d8b146100f65780639c3770531461010e578063aa6fd82214610129578063acabb9ed1461013e578063e17bf956146101d0575b610000565b34610000576100c4600480803590602001908201803590602001908080601f0160208091040260200160405190810160405280939291908181526020018383808284375094965061022595505050505050565b005b34610000576100c46004356102b8565b005b34610000576100c460043560243560443560643560843561031e565b005b34610000576100c46004356024356044356103e0565b005b34610000576100c46004356024356044356064356104b3565b005b34610000576100c4600435602435610563565b005b34610000576100c4600480803590602001908201803590602001908080601f0160208091040260200160405190810160405280939291908181526020018383808284375050604080516020601f89358b0180359182018390048302840183019094528083529799988101979196509182019450925082915084018382808284375094965061061895505050505050565b005b34610000576100c4600480803590602001908201803590602001908080601f016020809104026020016040519081016040528093929190818152602001838380828437509496506106df95505050505050565b005b7fa95e6e2a182411e7a6f9ed114a85c3761d87f9b8f453d842c71235aa64fff99f8160405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156102a75780820380516001836020036101000a031916815260200191505b509250505060405180910390a15b50565b600181600b81116100005714156102f7576040517f1e86022f78f8d04f8e3dfd13a2bdb280403e6632877c0dbee5e4eeb259908a5c90600090a16102b5565b600081600b811161000057141561006c57604051600090a06102b5565b610000565b5b5b50565b600585600b811161000057141561037b5760408051858152602081018590528082018490526060810183905290517ff039d147f23fe975a4254bdf6b1502b8c79132ae1833986b7ccef2638e73fdf99181900360800190a16103d7565b600b85600b811161000057141561006c5780827fa30ece802b64cd2b7e57dabf4010aabf5df26d1556977affb07b98a77ad955b58686604051808381526020018281526020019250505060405180910390a36103d7565b610000565b5b5b5050505050565b600383600b811161000057141561043157604080518381526020810183905281517fdf0cb1dea99afceb3ea698d62e705b736f1345a7eee9eb07e63d1f8f556c1bc5929181900390910190a16104ab565b600983600b811161000057141561047d5760408051838152905182917f057bc32826fbe161da1c110afcdcae7c109a8b69149f727fc37a603c60ef94ca919081900360200190a26104ab565b600883600b811161000057141561006c57604080518381529051829181900360200190a16104ab565b610000565b5b5b5b505050565b600484600b811161000057141561050957604080518481526020810184905280820183905290517f4a25b279c7c585f25eda9788ac9420ebadae78ca6b206a0e6ab488fd81f550629181900360600190a161055b565b600a84600b811161000057141561006c57604080518481529051829184917ff16c999b533366ca5138d78e85da51611089cd05749f098d6c225d4cd42ee6ec9181900360200190a361055b565b610000565b5b5b50505050565b600282600b81116100005714156105ac576040805182815290517f56d2ef3c5228bf5d88573621e325a4672ab50e033749a601e4f4a5e1dce905d49181900360200190a1610611565b600782600b81116100005714156105ed5760405181907ff70fe689e290d8ce2b2a388ac28db36fbb0e16a6d89c6804c461f65a1b40bb1590600090a2610611565b600682600b811161000057141561006c576040518190600090a1610611565b610000565b5b5b5b5050565b81604051808280519060200190808383829060006004602084601f0104600302600f01f15090500191505060405180910390207fe77cf33df73da7bc2e253a2dae617e6f15e4e337eaa462a108903af4643d1b758260405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106cd5780820380516001836020036101000a031916815260200191505b509250505060405180910390a25b5050565b7f532fd6ea96cfb78bb46e09279a26828b8b493de1a2b8b1ee1face527978a15a58160405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156102a75780820380516001836020036101000a031916815260200191505b509250505060405180910390a15b5056" # noqa: E501 CONTRACT_EMITTER_ABI = json.loads('[{"constant":false,"inputs":[{"name":"v","type":"string"}],"name":"logString","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"which","type":"uint8"}],"name":"logNoArgs","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"which","type":"uint8"},{"name":"arg0","type":"uint256"},{"name":"arg1","type":"uint256"},{"name":"arg2","type":"uint256"},{"name":"arg3","type":"uint256"}],"name":"logQuadruple","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"which","type":"uint8"},{"name":"arg0","type":"uint256"},{"name":"arg1","type":"uint256"}],"name":"logDouble","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"which","type":"uint8"},{"name":"arg0","type":"uint256"},{"name":"arg1","type":"uint256"},{"name":"arg2","type":"uint256"}],"name":"logTriple","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"which","type":"uint8"},{"name":"arg0","type":"uint256"}],"name":"logSingle","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"arg0","type":"string"},{"name":"arg1","type":"string"}],"name":"logDynamicArgs","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"v","type":"bytes"}],"name":"logBytes","outputs":[],"payable":false,"type":"function"},{"anonymous":true,"inputs":[],"name":"LogAnonymous","type":"event"},{"anonymous":false,"inputs":[],"name":"LogNoArguments","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"arg0","type":"uint256"}],"name":"LogSingleArg","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"arg0","type":"uint256"},{"indexed":false,"name":"arg1","type":"uint256"}],"name":"LogDoubleArg","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"arg0","type":"uint256"},{"indexed":false,"name":"arg1","type":"uint256"},{"indexed":false,"name":"arg2","type":"uint256"}],"name":"LogTripleArg","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"arg0","type":"uint256"},{"indexed":false,"name":"arg1","type":"uint256"},{"indexed":false,"name":"arg2","type":"uint256"},{"indexed":false,"name":"arg3","type":"uint256"}],"name":"LogQuadrupleArg","type":"event"},{"anonymous":true,"inputs":[{"indexed":true,"name":"arg0","type":"uint256"}],"name":"LogSingleAnonymous","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"arg0","type":"uint256"}],"name":"LogSingleWithIndex","type":"event"},{"anonymous":true,"inputs":[{"indexed":false,"name":"arg0","type":"uint256"},{"indexed":true,"name":"arg1","type":"uint256"}],"name":"LogDoubleAnonymous","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"arg0","type":"uint256"},{"indexed":true,"name":"arg1","type":"uint256"}],"name":"LogDoubleWithIndex","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"arg0","type":"uint256"},{"indexed":true,"name":"arg1","type":"uint256"},{"indexed":true,"name":"arg2","type":"uint256"}],"name":"LogTripleWithIndex","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"arg0","type":"uint256"},{"indexed":false,"name":"arg1","type":"uint256"},{"indexed":true,"name":"arg2","type":"uint256"},{"indexed":true,"name":"arg3","type":"uint256"}],"name":"LogQuadrupleWithIndex","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"arg0","type":"string"},{"indexed":false,"name":"arg1","type":"string"}],"name":"LogDynamicArgs","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"v","type":"bytes"}],"name":"LogBytes","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"v","type":"string"}],"name":"LogString","type":"event"}]') # noqa: E501 @pytest.fixture() def EMITTER_CODE(): return CONTRACT_EMITTER_CODE @pytest.fixture() def EMITTER_RUNTIME(): return CONTRACT_EMITTER_RUNTIME @pytest.fixture() def EMITTER_ABI(): return CONTRACT_EMITTER_ABI @pytest.fixture() def EMITTER(EMITTER_CODE, EMITTER_RUNTIME, EMITTER_ABI): return { 'bytecode': EMITTER_CODE, 'bytecode_runtime': EMITTER_RUNTIME, 'abi': EMITTER_ABI, } @pytest.fixture() def Emitter(web3_empty, EMITTER): web3 = web3_empty return web3.eth.contract(**EMITTER) @pytest.fixture() def emitter(web3_empty, Emitter, wait_for_transaction, wait_for_block): web3 = web3_empty wait_for_block(web3) deploy_txn_hash = Emitter.deploy({'from': web3.eth.coinbase, 'gas': 1000000}) deploy_receipt = wait_for_transaction(web3, deploy_txn_hash) contract_address = deploy_receipt['contractAddress'] bytecode = web3.eth.getCode(contract_address) assert bytecode == Emitter.bytecode_runtime return Emitter(address=contract_address) class LogFunctions(object): LogAnonymous = 0 LogNoArguments = 1 LogSingleArg = 2 LogDoubleArg = 3 LogTripleArg = 4 LogQuadrupleArg = 5 LogSingleAnonymous = 6 LogSingleWithIndex = 7 LogDoubleAnonymous = 8 LogDoubleWithIndex = 9 LogTripleWithIndex = 10 LogQuadrupleWithIndex = 11 @pytest.fixture() def emitter_event_ids(): return LogFunctions def _encode_to_topic(event_signature): return event_signature_to_log_topic(event_signature) class LogTopics(object): LogAnonymous = _encode_to_topic("LogAnonymous()") LogNoArguments = _encode_to_topic("LogNoArguments()") LogSingleArg = _encode_to_topic("LogSingleArg(uint256)") LogSingleAnonymous = _encode_to_topic("LogSingleAnonymous(uint256)") LogSingleWithIndex = _encode_to_topic("LogSingleWithIndex(uint256)") LogDoubleArg = _encode_to_topic("LogDoubleArg(uint256,uint256)") LogDoubleAnonymous = _encode_to_topic("LogDoubleAnonymous(uint256,uint256)") LogDoubleWithIndex = _encode_to_topic("LogDoubleWithIndex(uint256,uint256)") LogTripleArg = _encode_to_topic("LogTripleArg(uint256,uint256,uint256)") LogTripleWithIndex = _encode_to_topic("LogTripleWithIndex(uint256,uint256,uint256)") LogQuadrupleArg = _encode_to_topic("LogQuadrupleArg(uint256,uint256,uint256,uint256)") LogQuadrupleWithIndex = _encode_to_topic( "LogQuadrupleWithIndex(uint256,uint256,uint256,uint256)", ) LogBytes = _encode_to_topic("LogBytes(bytes)") LogString = _encode_to_topic("LogString(string)") LogDynamicArgs = _encode_to_topic("LogDynamicArgs(string,string)") @pytest.fixture() def emitter_log_topics(): return LogTopics @pytest.fixture() def some_address(): return '0x5B2063246F2191f18F2675ceDB8b28102e957458'
mit
willdecker/suds
suds/client.py
1
27894
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ The I{2nd generation} service proxy provides access to web services. See I{README.txt} """ import suds import suds.metrics as metrics from cookielib import CookieJar from suds import * from suds.reader import DefinitionsReader from suds.transport import TransportError, Request from suds.transport.https import HttpAuthenticated from suds.servicedefinition import ServiceDefinition from suds import sudsobject from sudsobject import Factory as InstFactory from sudsobject import Object from suds.resolver import PathResolver from suds.builder import Builder from suds.wsdl import Definitions from suds.cache import MemCache # for testing #from suds.cache import ObjectCache from suds.sax.document import Document from suds.sax.parser import Parser from suds.options import Options from suds.properties import Unskin from urlparse import urlparse from copy import deepcopy from suds.plugin import PluginContainer from logging import getLogger log = getLogger(__name__) class Client(object): """ A lightweight web services client. I{(2nd generation)} API. @ivar wsdl: The WSDL object. @type wsdl:L{Definitions} @ivar service: The service proxy used to invoke operations. @type service: L{Service} @ivar factory: The factory used to create objects. @type factory: L{Factory} @ivar sd: The service definition @type sd: L{ServiceDefinition} @ivar messages: The last sent/received messages. @type messages: str[2] """ @classmethod def items(cls, sobject): """ Extract the I{items} from a suds object much like the items() method works on I{dict}. @param sobject: A suds object @type sobject: L{Object} @return: A list of items contained in I{sobject}. @rtype: [(key, value),...] """ return sudsobject.items(sobject) @classmethod def dict(cls, sobject): """ Convert a sudsobject into a dictionary. @param sobject: A suds object @type sobject: L{Object} @return: A python dictionary containing the items contained in I{sobject}. @rtype: dict """ return sudsobject.asdict(sobject) @classmethod def metadata(cls, sobject): """ Extract the metadata from a suds object. @param sobject: A suds object @type sobject: L{Object} @return: The object's metadata @rtype: L{sudsobject.Metadata} """ return sobject.__metadata__ def __init__(self, url, **kwargs): """ @param url: The URL for the WSDL. @type url: str @param kwargs: keyword arguments. @see: L{Options} """ options = Options() options.transport = HttpAuthenticated() self.options = options options.cache = MemCache() # changed from None to use memcache on GAE # for testing # options.cache = ObjectCache() self.set_options(**kwargs) reader = DefinitionsReader(options, Definitions) self.wsdl = reader.open(url) plugins = PluginContainer(options.plugins) plugins.init.initialized(wsdl=self.wsdl) self.factory = Factory(self.wsdl) self.service = ServiceSelector(self, self.wsdl.services) self.sd = [] for s in self.wsdl.services: sd = ServiceDefinition(self.wsdl, s) self.sd.append(sd) self.messages = dict(tx=None, rx=None) def set_options(self, **kwargs): """ Set options. @param kwargs: keyword arguments. @see: L{Options} """ p = Unskin(self.options) p.update(kwargs) def add_prefix(self, prefix, uri): """ Add I{static} mapping of an XML namespace prefix to a namespace. This is useful for cases when a wsdl and referenced schemas make heavy use of namespaces and those namespaces are subject to changed. @param prefix: An XML namespace prefix. @type prefix: str @param uri: An XML namespace URI. @type uri: str @raise Exception: when prefix is already mapped. """ root = self.wsdl.root mapped = root.resolvePrefix(prefix, None) if mapped is None: root.addPrefix(prefix, uri) return if mapped[1] != uri: raise Exception('"%s" already mapped as "%s"' % (prefix, mapped)) def last_sent(self): """ Get last sent I{soap} message. @return: The last sent I{soap} message. @rtype: L{Document} """ return self.messages.get('tx') def last_received(self): """ Get last received I{soap} message. @return: The last received I{soap} message. @rtype: L{Document} """ return self.messages.get('rx') def clone(self): """ Get a shallow clone of this object. The clone only shares the WSDL. All other attributes are unique to the cloned object including options. @return: A shallow clone. @rtype: L{Client} """ class Uninitialized(Client): def __init__(self): pass clone = Uninitialized() clone.options = Options() cp = Unskin(clone.options) mp = Unskin(self.options) cp.update(deepcopy(mp)) clone.wsdl = self.wsdl clone.factory = self.factory clone.service = ServiceSelector(clone, self.wsdl.services) clone.sd = self.sd clone.messages = dict(tx=None, rx=None) return clone def __str__(self): return unicode(self) def __unicode__(self): s = ['\n'] build = suds.__build__.split() s.append('Suds ( https://fedorahosted.org/suds/ )') s.append(' version: %s' % suds.__version__) if len(build) > 1: s.append(' %s build: %s' % (build[0], build[1])) else: s.append(' build: %s' % (build[0])) for sd in self.sd: s.append('\n\n%s' % unicode(sd)) return ''.join(s) class Factory: """ A factory for instantiating types defined in the wsdl @ivar resolver: A schema type resolver. @type resolver: L{PathResolver} @ivar builder: A schema object builder. @type builder: L{Builder} """ def __init__(self, wsdl): """ @param wsdl: A schema object. @type wsdl: L{wsdl.Definitions} """ self.wsdl = wsdl self.resolver = PathResolver(wsdl) self.builder = Builder(self.resolver) def create(self, name): """ create a WSDL type by name @param name: The name of a type defined in the WSDL. @type name: str @return: The requested object. @rtype: L{Object} """ timer = metrics.Timer() timer.start() type = self.resolver.find(name) if type is None: raise TypeNotFound(name) if type.enum(): result = InstFactory.object(name) for e, a in type.children(): setattr(result, e.name, e.name) else: try: result = self.builder.build(type) except Exception, e: log.error("create '%s' failed", name, exc_info=True) raise BuildError(name, e) timer.stop() metrics.log.debug('%s created: %s', name, timer) return result def separator(self, ps): """ Set the path separator. @param ps: The new path separator. @type ps: char """ self.resolver = PathResolver(self.wsdl, ps) class ServiceSelector: """ The B{service} selector is used to select a web service. In most cases, the wsdl only defines (1) service in which access by subscript is passed through to a L{PortSelector}. This is also the behavior when a I{default} service has been specified. In cases where multiple services have been defined and no default has been specified, the service is found by name (or index) and a L{PortSelector} for the service is returned. In all cases, attribute access is forwarded to the L{PortSelector} for either the I{first} service or the I{default} service (when specified). @ivar __client: A suds client. @type __client: L{Client} @ivar __services: A list of I{wsdl} services. @type __services: list """ def __init__(self, client, services): """ @param client: A suds client. @type client: L{Client} @param services: A list of I{wsdl} services. @type services: list """ self.__client = client self.__services = services def __getattr__(self, name): """ Request to access an attribute is forwarded to the L{PortSelector} for either the I{first} service or the I{default} service (when specified). @param name: The name of a method. @type name: str @return: A L{PortSelector}. @rtype: L{PortSelector}. """ default = self.__ds() if default is None: port = self.__find(0) else: port = default return getattr(port, name) def __getitem__(self, name): """ Provides selection of the I{service} by name (string) or index (integer). In cases where only (1) service is defined or a I{default} has been specified, the request is forwarded to the L{PortSelector}. @param name: The name (or index) of a service. @type name: (int|str) @return: A L{PortSelector} for the specified service. @rtype: L{PortSelector}. """ if len(self.__services) == 1: port = self.__find(0) return port[name] default = self.__ds() if default is not None: port = default return port[name] return self.__find(name) def __find(self, name): """ Find a I{service} by name (string) or index (integer). @param name: The name (or index) of a service. @type name: (int|str) @return: A L{PortSelector} for the found service. @rtype: L{PortSelector}. """ service = None if not len(self.__services): raise Exception, 'No services defined' if isinstance(name, int): try: service = self.__services[name] name = service.name except IndexError: raise ServiceNotFound, 'at [%d]' % name else: for s in self.__services: if name == s.name: service = s break if service is None: raise ServiceNotFound, name return PortSelector(self.__client, service.ports, name) def __ds(self): """ Get the I{default} service if defined in the I{options}. @return: A L{PortSelector} for the I{default} service. @rtype: L{PortSelector}. """ ds = self.__client.options.service if ds is None: return None else: return self.__find(ds) class PortSelector: """ The B{port} selector is used to select a I{web service} B{port}. In cases where multiple ports have been defined and no default has been specified, the port is found by name (or index) and a L{MethodSelector} for the port is returned. In all cases, attribute access is forwarded to the L{MethodSelector} for either the I{first} port or the I{default} port (when specified). @ivar __client: A suds client. @type __client: L{Client} @ivar __ports: A list of I{service} ports. @type __ports: list @ivar __qn: The I{qualified} name of the port (used for logging). @type __qn: str """ def __init__(self, client, ports, qn): """ @param client: A suds client. @type client: L{Client} @param ports: A list of I{service} ports. @type ports: list @param qn: The name of the service. @type qn: str """ self.__client = client self.__ports = ports self.__qn = qn def __getattr__(self, name): """ Request to access an attribute is forwarded to the L{MethodSelector} for either the I{first} port or the I{default} port (when specified). @param name: The name of a method. @type name: str @return: A L{MethodSelector}. @rtype: L{MethodSelector}. """ default = self.__dp() if default is None: m = self.__find(0) else: m = default return getattr(m, name) def __getitem__(self, name): """ Provides selection of the I{port} by name (string) or index (integer). In cases where only (1) port is defined or a I{default} has been specified, the request is forwarded to the L{MethodSelector}. @param name: The name (or index) of a port. @type name: (int|str) @return: A L{MethodSelector} for the specified port. @rtype: L{MethodSelector}. """ default = self.__dp() if default is None: return self.__find(name) else: return default def __find(self, name): """ Find a I{port} by name (string) or index (integer). @param name: The name (or index) of a port. @type name: (int|str) @return: A L{MethodSelector} for the found port. @rtype: L{MethodSelector}. """ port = None if not len(self.__ports): raise Exception, 'No ports defined: %s' % self.__qn if isinstance(name, int): qn = '%s[%d]' % (self.__qn, name) try: port = self.__ports[name] except IndexError: raise PortNotFound, qn else: qn = '.'.join((self.__qn, name)) for p in self.__ports: if name == p.name: port = p break if port is None: raise PortNotFound, qn qn = '.'.join((self.__qn, port.name)) return MethodSelector(self.__client, port.methods, qn) def __dp(self): """ Get the I{default} port if defined in the I{options}. @return: A L{MethodSelector} for the I{default} port. @rtype: L{MethodSelector}. """ dp = self.__client.options.port if dp is None: return None else: return self.__find(dp) class MethodSelector: """ The B{method} selector is used to select a B{method} by name. @ivar __client: A suds client. @type __client: L{Client} @ivar __methods: A dictionary of methods. @type __methods: dict @ivar __qn: The I{qualified} name of the method (used for logging). @type __qn: str """ def __init__(self, client, methods, qn): """ @param client: A suds client. @type client: L{Client} @param methods: A dictionary of methods. @type methods: dict @param qn: The I{qualified} name of the port. @type qn: str """ self.__client = client self.__methods = methods self.__qn = qn def __getattr__(self, name): """ Get a method by name and return it in an I{execution wrapper}. @param name: The name of a method. @type name: str @return: An I{execution wrapper} for the specified method name. @rtype: L{Method} """ return self[name] def __getitem__(self, name): """ Get a method by name and return it in an I{execution wrapper}. @param name: The name of a method. @type name: str @return: An I{execution wrapper} for the specified method name. @rtype: L{Method} """ m = self.__methods.get(name) if m is None: qn = '.'.join((self.__qn, name)) raise MethodNotFound, qn return Method(self.__client, m) class Method: """ The I{method} (namespace) object. @ivar client: A client object. @type client: L{Client} @ivar method: A I{wsdl} method. @type I{wsdl} Method. """ def __init__(self, client, method): """ @param client: A client object. @type client: L{Client} @param method: A I{raw} method. @type I{raw} Method. """ self.client = client self.method = method def __call__(self, *args, **kwargs): """ Invoke the method. """ clientclass = self.clientclass(kwargs) client = clientclass(self.client, self.method) if not self.faults(): try: return client.invoke(args, kwargs) except WebFault, e: return (500, e) else: return client.invoke(args, kwargs) def faults(self): """ get faults option """ return self.client.options.faults def clientclass(self, kwargs): """ get soap client class """ if SimClient.simulation(kwargs): return SimClient else: return SoapClient class SoapClient: """ A lightweight soap based web client B{**not intended for external use} @ivar service: The target method. @type service: L{Service} @ivar method: A target method. @type method: L{Method} @ivar options: A dictonary of options. @type options: dict @ivar cookiejar: A cookie jar. @type cookiejar: libcookie.CookieJar """ def __init__(self, client, method): """ @param client: A suds client. @type client: L{Client} @param method: A target method. @type method: L{Method} """ self.client = client self.method = method self.options = client.options self.cookiejar = CookieJar() def invoke(self, args, kwargs): """ Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin}|I{subclass of} L{Object} """ timer = metrics.Timer() timer.start() result = None binding = self.method.binding.input soapenv = binding.get_message(self.method, args, kwargs) timer.stop() metrics.log.debug( "message for '%s' created: %s", self.method.name, timer) timer.start() result = self.send(soapenv) timer.stop() metrics.log.debug( "method '%s' invoked: %s", self.method.name, timer) return result def send(self, soapenv): """ Send soap message. @param soapenv: A soap envelope to send. @type soapenv: L{Document} @return: The reply to the sent message. @rtype: I{builtin} or I{subclass of} L{Object} """ result = None location = self.location() binding = self.method.binding.input transport = self.options.transport retxml = self.options.retxml nosend = self.options.nosend prettyxml = self.options.prettyxml timer = metrics.Timer() log.debug('sending to (%s)\nmessage:\n%s', location, soapenv) try: self.last_sent(soapenv) plugins = PluginContainer(self.options.plugins) plugins.message.marshalled(envelope=soapenv.root()) if prettyxml: soapenv = soapenv.str() else: soapenv = soapenv.plain() soapenv = soapenv.encode('utf-8') ctx = plugins.message.sending(envelope=soapenv) soapenv = ctx.envelope if nosend: return RequestContext(self, binding, soapenv) request = Request(location, soapenv) request.headers = self.headers() timer.start() reply = transport.send(request) timer.stop() metrics.log.debug('waited %s on server reply', timer) ctx = plugins.message.received(reply=reply.message) reply.message = ctx.reply if retxml: result = reply.message else: result = self.succeeded(binding, reply.message) except TransportError, e: if e.httpcode in (202,204): result = None else: log.error(self.last_sent()) result = self.failed(binding, e) return result def headers(self): """ Get http headers or the http/https request. @return: A dictionary of header/values. @rtype: dict """ action = self.method.soap.action if isinstance(action, unicode): action = action.encode('utf-8') stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action } result = dict(stock, **self.options.headers) log.debug('headers = %s', result) return result def succeeded(self, binding, reply): """ Request succeeded, process the reply @param binding: The binding to be used to process the reply. @type binding: L{bindings.binding.Binding} @param reply: The raw reply text. @type reply: str @return: The method result. @rtype: I{builtin}, L{Object} @raise WebFault: On server. """ log.debug('http succeeded:\n%s', reply) plugins = PluginContainer(self.options.plugins) if len(reply) > 0: reply, result = binding.get_reply(self.method, reply) self.last_received(reply) else: result = None ctx = plugins.message.unmarshalled(reply=result) result = ctx.reply if self.options.faults: return result else: return (200, result) def failed(self, binding, error): """ Request failed, process reply based on reason @param binding: The binding to be used to process the reply. @type binding: L{suds.bindings.binding.Binding} @param error: The http error message @type error: L{transport.TransportError} """ status, reason = (error.httpcode, tostr(error)) reply = error.fp.read() log.debug('http failed:\n%s', reply) if status == 500: if len(reply) > 0: r, p = binding.get_fault(reply) self.last_received(r) return (status, p) else: return (status, None) if self.options.faults: raise Exception((status, reason)) else: return (status, None) def location(self): p = Unskin(self.options) return p.get('location', self.method.location) def last_sent(self, d=None): key = 'tx' messages = self.client.messages if d is None: return messages.get(key) else: messages[key] = d def last_received(self, d=None): key = 'rx' messages = self.client.messages if d is None: return messages.get(key) else: messages[key] = d class SimClient(SoapClient): """ Loopback client used for message/reply simulation. """ injkey = '__inject' @classmethod def simulation(cls, kwargs): """ get whether loopback has been specified in the I{kwargs}. """ return kwargs.has_key(SimClient.injkey) def invoke(self, args, kwargs): """ Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin} or I{subclass of} L{Object} """ simulation = kwargs[self.injkey] msg = simulation.get('msg') reply = simulation.get('reply') fault = simulation.get('fault') if msg is None: if reply is not None: return self.__reply(reply, args, kwargs) if fault is not None: return self.__fault(fault) raise Exception('(reply|fault) expected when msg=None') sax = Parser() msg = sax.parse(string=msg) return self.send(msg) def __reply(self, reply, args, kwargs): """ simulate the reply """ binding = self.method.binding.input msg = binding.get_message(self.method, args, kwargs) log.debug('inject (simulated) send message:\n%s', msg) binding = self.method.binding.output return self.succeeded(binding, reply) def __fault(self, reply): """ simulate the (fault) reply """ binding = self.method.binding.output if self.options.faults: r, p = binding.get_fault(reply) self.last_received(r) return (500, p) else: return (500, None) class RequestContext: """ A request context. Returned when the ''nosend'' options is specified. @ivar client: The suds client. @type client: L{Client} @ivar binding: The binding for this request. @type binding: I{Binding} @ivar envelope: The request soap envelope. @type envelope: str """ def __init__(self, client, binding, envelope): """ @param client: The suds client. @type client: L{Client} @param binding: The binding for this request. @type binding: I{Binding} @param envelope: The request soap envelope. @type envelope: str """ self.client = client self.binding = binding self.envelope = envelope def succeeded(self, reply): """ Re-entry for processing a successful reply. @param reply: The reply soap envelope. @type reply: str @return: The returned value for the invoked method. @rtype: object """ options = self.client.options plugins = PluginContainer(options.plugins) ctx = plugins.message.received(reply=reply) reply = ctx.reply return self.client.succeeded(self.binding, reply) def failed(self, error): """ Re-entry for processing a failure reply. @param error: The error returned by the transport. @type error: A suds I{TransportError}. """ return self.client.failed(self.binding, error)
mit
OpenCode/l10n-italy
l10n_it_pec/model/partner.py
11
1108
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 Associazione Odoo Italia # (<http://www.openerp-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields class ResPartner(models.Model): _inherit = "res.partner" pec_mail = fields.Char(string='PEC Mail')
agpl-3.0
ksooklall/deep_learning_foundation
embeddings/utils.py
150
2194
import re from collections import Counter def preprocess(text): # Replace punctuation with tokens so we can use them in our model text = text.lower() text = text.replace('.', ' <PERIOD> ') text = text.replace(',', ' <COMMA> ') text = text.replace('"', ' <QUOTATION_MARK> ') text = text.replace(';', ' <SEMICOLON> ') text = text.replace('!', ' <EXCLAMATION_MARK> ') text = text.replace('?', ' <QUESTION_MARK> ') text = text.replace('(', ' <LEFT_PAREN> ') text = text.replace(')', ' <RIGHT_PAREN> ') text = text.replace('--', ' <HYPHENS> ') text = text.replace('?', ' <QUESTION_MARK> ') # text = text.replace('\n', ' <NEW_LINE> ') text = text.replace(':', ' <COLON> ') words = text.split() # Remove all words with 5 or fewer occurences word_counts = Counter(words) trimmed_words = [word for word in words if word_counts[word] > 5] return trimmed_words def get_batches(int_text, batch_size, seq_length): """ Return batches of input and target :param int_text: Text with the words replaced by their ids :param batch_size: The size of batch :param seq_length: The length of sequence :return: A list where each item is a tuple of (batch of input, batch of target). """ n_batches = int(len(int_text) / (batch_size * seq_length)) # Drop the last few characters to make only full batches xdata = np.array(int_text[: n_batches * batch_size * seq_length]) ydata = np.array(int_text[1: n_batches * batch_size * seq_length + 1]) x_batches = np.split(xdata.reshape(batch_size, -1), n_batches, 1) y_batches = np.split(ydata.reshape(batch_size, -1), n_batches, 1) return list(zip(x_batches, y_batches)) def create_lookup_tables(words): """ Create lookup tables for vocabulary :param words: Input list of words :return: A tuple of dicts. The first dict.... """ word_counts = Counter(words) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab
mit
robk5uj/invenio
modules/bibclassify/lib/bibclassify_daemon.py
8
15571
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ BibClassify daemon. FIXME: the code below requires collection table to be updated to add column: clsMETHOD_fk mediumint(9) unsigned NOT NULL, This is not clean and should be fixed. This module IS NOT standalone safe - it should never be run so. """ import sys import time import os import bibclassify_config as bconfig import bibclassify_text_extractor import bibclassify_engine import bibclassify_webinterface import bibtask from dbquery import run_sql from intbitset import intbitset from search_engine import get_collection_reclist from bibdocfile import BibRecDocs # Global variables allowing to retain the progress of the task. _INDEX = 0 _RECIDS_NUMBER = 0 ## INTERFACE def bibclassify_daemon(): """Constructs the BibClassify bibtask.""" bibtask.task_init(authorization_action='runbibclassify', authorization_msg="BibClassify Task Submission", description="Extract keywords and create a BibUpload " "task.\nExamples:\n" " $ bibclassify\n" " $ bibclassify -i 79 -k HEP\n" " $ bibclassify -c 'Articles' -k HEP\n", help_specific_usage=" -i, --recid\t\tkeywords are extracted from " "this record\n" " -c, --collection\t\tkeywords are extracted from this collection\n" " -k, --taxonomy\t\tkeywords are based on that reference", version="Invenio BibClassify v%s" % bconfig.VERSION, specific_params=("i:c:k:f", [ "recid=", "collection=", "taxonomy=", "force" ]), task_submit_elaborate_specific_parameter_fnc= _task_submit_elaborate_specific_parameter, task_submit_check_options_fnc=_task_submit_check_options, task_run_fnc=_task_run_core) ## PRIVATE METHODS def _ontology_exists(ontology_name): """Check if the ontology name is registered in the database.""" if run_sql("SELECT name FROM clsMETHOD WHERE name=%s", (ontology_name,)): return True return False def _collection_exists(collection_name): """Check if the collection name is registered in the database.""" if run_sql("SELECT name FROM collection WHERE name=%s", (collection_name,)): return True return False def _recid_exists(recid): """Check if the recid number is registered in the database.""" if run_sql("SELECT id FROM bibrec WHERE id=%s", (recid,)): return True return False def _get_recids_foreach_ontology(recids=None, collections=None, taxonomy=None): """Returns an array containing hash objects containing the collection, its corresponding ontology and the records belonging to the given collection.""" rec_onts = [] # User specified record IDs. if recids: rec_onts.append({ 'ontology': taxonomy, 'collection': None, 'recIDs': recids, }) return rec_onts # User specified collections. if collections: for collection in collections: records = get_collection_reclist(collection) if records: rec_onts.append({ 'ontology': taxonomy, 'collection': collection, 'recIDs': records }) return rec_onts # Use rules found in collection_clsMETHOD. result = run_sql("SELECT clsMETHOD.name, clsMETHOD.last_updated, " "collection.name FROM clsMETHOD JOIN collection_clsMETHOD ON " "clsMETHOD.id=id_clsMETHOD JOIN collection ON " "id_collection=collection.id") for ontology, date_last_run, collection in result: records = get_collection_reclist(collection) if records: if not date_last_run: bibtask.write_message("INFO: Collection %s has not been previously " "analyzed." % collection, stream=sys.stderr, verbose=3) modified_records = intbitset(run_sql("SELECT id FROM bibrec")) elif bibtask.task_get_option('force'): bibtask.write_message("INFO: Analysis is forced for collection %s." % collection, stream=sys.stderr, verbose=3) modified_records = intbitset(run_sql("SELECT id FROM bibrec")) else: modified_records = intbitset(run_sql("SELECT id FROM bibrec " "WHERE modification_date >= %s", (date_last_run, ))) records &= modified_records if records: rec_onts.append({ 'ontology': ontology, 'collection': collection, 'recIDs': records }) else: bibtask.write_message("WARNING: All records from collection '%s' have " "already been analyzed for keywords with ontology '%s' " "on %s." % (collection, ontology, date_last_run), stream=sys.stderr, verbose=2) else: bibtask.write_message("ERROR: Collection '%s' doesn't contain any record. " "Cannot analyse keywords." % (collection,), stream=sys.stderr, verbose=0) return rec_onts def _update_date_of_last_run(runtime): """Update bibclassify daemon table information about last run time.""" run_sql("UPDATE clsMETHOD SET last_updated=%s", (runtime,)) def _task_submit_elaborate_specific_parameter(key, value, opts, args): """Given the string key it checks it's meaning, eventually using the value. Usually it fills some key in the options dict. It must return True if it has elaborated the key, False, if it doesn't know that key. eg: if key in ('-n', '--number'): bibtask.task_get_option(\1) = value return True return False """ # Recid option if key in ("-i", "--recid"): try: value = int(value) except ValueError: bibtask.write_message("The value specified for --recid must be a " "valid integer, not '%s'." % value, stream=sys.stderr, verbose=0) if not _recid_exists(value): bibtask.write_message("ERROR: '%s' is not a valid record ID." % value, stream=sys.stderr, verbose=0) return False recids = bibtask.task_get_option('recids') if recids is None: recids = [] recids.append(value) bibtask.task_set_option('recids', recids) # Collection option elif key in ("-c", "--collection"): if not _collection_exists(value): bibtask.write_message("ERROR: '%s' is not a valid collection." % value, stream=sys.stderr, verbose=0) return False collections = bibtask.task_get_option("collections") collections = collections or [] collections.append(value) bibtask.task_set_option("collections", collections) # Taxonomy option elif key in ("-k", "--taxonomy"): if not _ontology_exists(value): bibtask.write_message("ERROR: '%s' is not a valid taxonomy name." % value, stream=sys.stderr, verbose=0) return False bibtask.task_set_option("taxonomy", value) elif key in ("-f", "--force"): bibtask.task_set_option("force", True) else: return False return True def _task_run_core(): """Runs analyse_documents for each ontology, collection, record ids set.""" automated_daemon_mode_p = True recids = bibtask.task_get_option('recids') collections = bibtask.task_get_option('collections') taxonomy = bibtask.task_get_option('taxonomy') if recids or collections: # We want to run some records/collection only, so we are not # in the automated daemon mode; this will be useful later. automated_daemon_mode_p = False # Check if the user specified which documents to extract keywords from. if recids: onto_recids = _get_recids_foreach_ontology(recids=recids, taxonomy=taxonomy) elif collections: onto_recids = _get_recids_foreach_ontology(collections=collections, taxonomy=taxonomy) else: onto_recids = _get_recids_foreach_ontology() if not onto_recids: # Nothing to do. if automated_daemon_mode_p: _update_date_of_last_run(bibtask.task_get_task_param('task_starting_time')) return 1 # We will write to a temporary file as we go, because we might be processing # big collections with many docs _rid = time.strftime("%Y%m%d%H%M%S", time.localtime()) abs_path = bibclassify_engine.get_tmp_file(_rid) fo = open(abs_path, 'w') fo.write('<?xml version="1.0" encoding="UTF-8"?>\n') fo.write('<collection xmlns="http://www.loc.gov/MARC21/slim">\n') # Count the total number of records in order to update the progression. global _RECIDS_NUMBER for onto_rec in onto_recids: _RECIDS_NUMBER += len(onto_rec['recIDs']) rec_added = False for onto_rec in onto_recids: bibtask.task_sleep_now_if_required(can_stop_too=False) if onto_rec['collection'] is not None: bibtask.write_message('INFO: Applying taxonomy %s to collection %s (%s ' 'records)' % (onto_rec['ontology'], onto_rec['collection'], len(onto_rec['recIDs'])), stream=sys.stderr, verbose=3) else: bibtask.write_message('INFO: Applying taxonomy %s to recIDs %s. ' % (onto_rec['ontology'], ', '.join([str(recid) for recid in onto_rec['recIDs']])), stream=sys.stderr, verbose=3) if onto_rec['recIDs']: xml = _analyze_documents(onto_rec['recIDs'], onto_rec['ontology'], onto_rec['collection']) if len(xml) > 5: fo.write(xml) rec_added = True fo.write('</collection>\n') fo.close() # Apply the changes. if rec_added: if bconfig.CFG_DB_SAVE_KW: bibclassify_webinterface.upload_keywords(abs_path) else: bibtask.write_message("INFO: CFG_DB_SAVE_KW is false, we don't save results", stream=sys.stderr, verbose=0) else: bibtask.write_message("WARNING: No keywords found, recids: %s" % onto_recids, stream=sys.stderr, verbose=0) os.remove(abs_path) # Update the date of last run in the clsMETHOD table, but only if # we were running in an automated mode. if automated_daemon_mode_p: _update_date_of_last_run(bibtask.task_get_task_param('task_starting_time')) return 1 def _analyze_documents(records, taxonomy_name, collection, output_limit=bconfig.CFG_BIBCLASSIFY_DEFAULT_OUTPUT_NUMBER): """For each collection, parse the documents attached to the records in collection with the corresponding taxonomy_name. @var records: list of recids to process @var taxonomy_name: str, name of the taxonomy, e.g. HEP @var collection: str, collection name @keyword output_limit: int, max number of keywords to extract [3] @return: str, marcxml output format of results """ global _INDEX if not records: # No records could be found. bibtask.write_message("WARNING: No records were found in collection %s." % collection, stream=sys.stderr, verbose=2) return False # Process records: output = [] for record in records: bibdocfiles = BibRecDocs(record).list_latest_files() # TODO: why this doesn't call list_all_files() ? keywords = {} akws = {} acro = {} single_keywords = composite_keywords = author_keywords = acronyms = None for doc in bibdocfiles: # Get the keywords for all PDF documents contained in the record. if bibclassify_text_extractor.is_pdf(doc.get_full_path()): bibtask.write_message('INFO: Generating keywords for record %d.' % record, stream=sys.stderr, verbose=3) fulltext = doc.get_path() single_keywords, composite_keywords, author_keywords, acronyms = \ bibclassify_engine.get_keywords_from_local_file(fulltext, taxonomy_name, with_author_keywords=True, output_mode="raw", output_limit=output_limit, match_mode='partial') else: bibtask.write_message('WARNING: BibClassify does not know how to process \ doc: %s (type: %s) -- ignoring it.' % (doc.fullpath, doc.doctype), stream=sys.stderr, verbose=3) if single_keywords or composite_keywords: cleaned_single = bibclassify_engine.clean_before_output(single_keywords) cleaned_composite = bibclassify_engine.clean_before_output(composite_keywords) # merge the groups into one keywords.update(cleaned_single) keywords.update(cleaned_composite) acro.update(acronyms) akws.update(author_keywords) if len(keywords): output.append('<record>') output.append('<controlfield tag="001">%s</controlfield>' % record) output.append(bibclassify_engine._output_marc(keywords.items(), (), akws, acro, spires=bconfig.CFG_SPIRES_FORMAT)) output.append('</record>') else: bibtask.write_message('WARNING: No keywords found for record %d.' % record, stream=sys.stderr, verbose=0) _INDEX += 1 bibtask.task_update_progress('Done %d out of %d.' % (_INDEX, _RECIDS_NUMBER)) bibtask.task_sleep_now_if_required(can_stop_too=False) return '\n'.join(output) def _task_submit_check_options(): """Required by bibtask. Checks the options.""" recids = bibtask.task_get_option('recids') collections = bibtask.task_get_option('collections') taxonomy = bibtask.task_get_option('taxonomy') # If a recid or a collection is specified, check that the taxonomy # is also specified. if (recids is not None or collections is not None) and \ taxonomy is None: bibtask.write_message("ERROR: When specifying a record ID or a collection, " "you have to precise which\ntaxonomy to use.", stream=sys.stderr, verbose=0) return False return True # FIXME: outfiledesc can be multiple files, e.g. when processing # 100000 records it is good to store results by 1000 records # (see oaiharvest)
gpl-2.0
slub/vk2-georeference
georeference/views/user/georeferencehistory.py
1
3257
# -*- coding: utf-8 -*- ''' Copyright (c) 2015 Jacob Mendt Created on 07.10.15 @author: mendt ''' import traceback from pyramid.view import view_config from pyramid.httpexceptions import HTTPInternalServerError from sqlalchemy import desc from georeference import LOGGER from georeference.settings import OAI_ID_PATTERN from georeference.utils.exceptions import ParameterException from georeference.models.vkdb.georeferenzierungsprozess import Georeferenzierungsprozess from georeference.models.vkdb.map import Map from georeference.models.vkdb.metadata import Metadata GENERAL_ERROR_MESSAGE = 'Something went wrong while trying to process your requests. Please try again or contact the administrators of the Virtual Map Forum 2.0.' @view_config(route_name='user-history', renderer='json') def generateGeoreferenceHistory(request): def getUserId(request): """ Parse the process id from the request. :type request: pyramid.request :return: str|None """ if request.method == 'GET' and 'userid' in request.matchdict: return request.matchdict['userid'] return None LOGGER.info('Request - Get georeference profile page.') dbsession = request.db try: userid = getUserId(request) if userid is None: raise ParameterException("Wrong or missing userid.") LOGGER.debug('Query georeference profile information from database for user %s'%userid) queryData = request.db.query(Georeferenzierungsprozess, Metadata, Map).join(Metadata, Georeferenzierungsprozess.mapid == Metadata.mapid)\ .join(Map, Georeferenzierungsprozess.mapid == Map.id)\ .filter(Georeferenzierungsprozess.nutzerid == userid)\ .order_by(desc(Georeferenzierungsprozess.id)) LOGGER.debug('Create response list') georef_profile = [] points = 0 for record in queryData: georef = record[0] metadata = record[1] mapObj = record[2] # # create response # responseRecord = {'georefid':georef.id, 'mapid':OAI_ID_PATTERN%georef.mapid, 'georefparams': georef.georefparams, 'time': str(metadata.timepublish), 'transformed': georef.processed, 'isvalide': georef.adminvalidation, 'title': metadata.title, 'key': mapObj.apsdateiname, 'georeftime':str(georef.timestamp),'type':georef.type, 'published':georef.processed, 'thumbnail': metadata.thumbsmid} # add boundingbox if exists if mapObj.boundingbox is not None: responseRecord['boundingbox'] = mapObj.getExtentAsString(dbsession, 4326) # calculate points if georef.adminvalidation is not 'invalide': points += 20 georef_profile.append(responseRecord) LOGGER.debug('Response: %s'%georef_profile) return {'georef_profile':georef_profile, 'points':points} except Exception as e: LOGGER.error('Error while trying to request georeference history information'); LOGGER.error(e) LOGGER.error(traceback.format_exc()) raise HTTPInternalServerError(GENERAL_ERROR_MESSAGE)
gpl-3.0
kinverarity1/pyexperiment
tests/test_printers.py
3
3779
"""Tests the utils.printers module of pyexperiment Written by Peter Duerr """ from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import import unittest import io import re from pyexperiment.utils.stdout_redirector import stdout_redirector from pyexperiment.utils import printers class PrinterTest(object): """ABC for the printer tests """ COLOR_SEQ = None """The color sequence of the tested color, set by the subclass """ def printer(self, messsage, *args): """Subclasses should implement this """ raise NotImplementedError def test_reset_seqence_appears(self): """Test printing actually produces the reset sequence """ buf = io.StringIO() with stdout_redirector(buf): self.printer("foo") # We will get the assertion later (by dependency injection) self.assertRegexpMatches( # pylint: disable=no-member buf.getvalue(), "%s" % re.escape(printers.RESET_SEQ)) def test_color_sequence_appears(self): """Test printing actually produces the color sequence """ buf = io.StringIO() with stdout_redirector(buf): self.printer("foo") # We will get the assertion later (by dependency injection) # pylint: disable=no-member self.assertIsNotNone(self.COLOR_SEQ) self.assertRegexpMatches( buf.getvalue(), "%s" % re.escape(self.COLOR_SEQ)) def test_message_sequence_appears(self): """Test printing actually prints the message """ buf = io.StringIO() with stdout_redirector(buf): self.printer("foo") # We will get the assertion later (by dependency injection) self.assertRegexpMatches( # pylint: disable=no-member buf.getvalue(), "foo") def test_message_interpolates_args(self): """Test printing actually interpolates the arguments correctly """ message = "str: %s, int: %d, float %f" arguments = ('bla', 12, 3.14) buf = io.StringIO() with stdout_redirector(buf): self.printer(message, *arguments) # We will get the assertion later (by dependency injection) self.assertRegexpMatches( # pylint: disable=no-member buf.getvalue(), r'.*%s.*' % (message % arguments)) def create_printer_test(color_): """Factory for printer tests """ class TestPrinters(unittest.TestCase, PrinterTest): """Test the printer for a color """ COLOR_SEQ = printers.COLORS[color_] """The color sequence of the tested color """ def printer(self, message, *args): """The printer to be tested """ getattr(printers, 'print_' + color_)(message, *args) return TestPrinters for color in printers.COLORS.keys(): vars()['Test' + color.title()] = create_printer_test(color) class TestPrinterExamples(unittest.TestCase): """Test the print_examples function of pyexperiment.utils.printers """ def test_examples_print_something(self): """Make sure calling print_examples prints something """ buf = io.StringIO() with stdout_redirector(buf): printers.print_examples() self.assertTrue(len(buf.getvalue()) > 0) def test_example_messsage_with_args(self): """Make sure print_examples interpolates message with arguments """ buf = io.StringIO() with stdout_redirector(buf): printers.print_examples("Foo %d", 3 + 4) self.assertRegexpMatches( buf.getvalue(), r'.*%s.*' % "Foo 7")
mit
user-none/calibre
src/calibre/db/legacy.py
2
41359
#!/usr/bin/env python2 # vim:fileencoding=utf-8 from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' import os, traceback, types from future_builtins import zip from calibre import force_unicode, isbytestring from calibre.constants import preferred_encoding from calibre.db import _get_next_series_num_for_list, _get_series_values, get_data_as_dict from calibre.db.adding import ( find_books_in_directory, import_book_directory_multiple, import_book_directory, recursive_import, add_catalog, add_news) from calibre.db.backend import DB from calibre.db.cache import Cache from calibre.db.errors import NoSuchFormat from calibre.db.categories import CATEGORY_SORTS from calibre.db.view import View from calibre.db.write import clean_identifier, get_series_values from calibre.utils.date import utcnow from calibre.utils.search_query_parser import set_saved_searches def cleanup_tags(tags): tags = [x.strip().replace(',', ';') for x in tags if x.strip()] tags = [x.decode(preferred_encoding, 'replace') if isbytestring(x) else x for x in tags] tags = [u' '.join(x.split()) for x in tags] ans, seen = [], set([]) for tag in tags: if tag.lower() not in seen: seen.add(tag.lower()) ans.append(tag) return ans def create_backend( library_path, default_prefs=None, read_only=False, progress_callback=lambda x, y:True, restore_all_prefs=False): return DB(library_path, default_prefs=default_prefs, read_only=read_only, restore_all_prefs=restore_all_prefs, progress_callback=progress_callback) class LibraryDatabase(object): ''' Emulate the old LibraryDatabase2 interface ''' PATH_LIMIT = DB.PATH_LIMIT WINDOWS_LIBRARY_PATH_LIMIT = DB.WINDOWS_LIBRARY_PATH_LIMIT CATEGORY_SORTS = CATEGORY_SORTS MATCH_TYPE = ('any', 'all') CUSTOM_DATA_TYPES = frozenset(['rating', 'text', 'comments', 'datetime', 'int', 'float', 'bool', 'series', 'composite', 'enumeration']) @classmethod def exists_at(cls, path): return path and os.path.exists(os.path.join(path, 'metadata.db')) def __init__(self, library_path, default_prefs=None, read_only=False, is_second_db=False, progress_callback=lambda x, y:True, restore_all_prefs=False): self.is_second_db = is_second_db self.listeners = set() backend = self.backend = create_backend(library_path, default_prefs=default_prefs, read_only=read_only, restore_all_prefs=restore_all_prefs, progress_callback=progress_callback) cache = self.new_api = Cache(backend) cache.init() self.data = View(cache) self.id = self.data.index_to_id self.row = self.data.id_to_index for x in ('get_property', 'count', 'refresh_ids', 'set_marked_ids', 'multisort', 'search', 'search_getting_ids'): setattr(self, x, getattr(self.data, x)) self.is_case_sensitive = getattr(backend, 'is_case_sensitive', False) self.custom_field_name = backend.custom_field_name self.last_update_check = self.last_modified() if not self.is_second_db: set_saved_searches(self, 'saved_searches') def close(self): self.new_api.close() def break_cycles(self): delattr(self.backend, 'field_metadata') self.data.cache.backend = None self.data.cache = None for x in ('data', 'backend', 'new_api', 'listeners',): delattr(self, x) # Library wide properties {{{ @property def prefs(self): return self.new_api.backend.prefs @property def field_metadata(self): return self.backend.field_metadata @property def user_version(self): return self.backend.user_version @property def library_id(self): return self.backend.library_id @property def library_path(self): return self.backend.library_path @property def dbpath(self): return self.backend.dbpath def last_modified(self): return self.backend.last_modified() def check_if_modified(self): if self.last_modified() > self.last_update_check: self.backend.reopen() self.new_api.reload_from_db() self.data.refresh(clear_caches=False) # caches are already cleared by reload_from_db() self.last_update_check = utcnow() @property def custom_column_num_map(self): return self.backend.custom_column_num_map @property def custom_column_label_map(self): return self.backend.custom_column_label_map @property def FIELD_MAP(self): return self.backend.FIELD_MAP @property def formatter_template_cache(self): return self.data.cache.formatter_template_cache def initialize_template_cache(self): self.data.cache.initialize_template_cache() def all_ids(self): 'All book ids in the db. This can no longer be a generator because of db locking.' return tuple(self.new_api.all_book_ids()) def is_empty(self): with self.new_api.safe_read_lock: return not bool(self.new_api.fields['title'].table.book_col_map) def get_usage_count_by_id(self, field): return [[k, v] for k, v in self.new_api.get_usage_count_by_id(field).iteritems()] def field_id_map(self, field): return [(k, v) for k, v in self.new_api.get_id_map(field).iteritems()] def get_custom_items_with_ids(self, label=None, num=None): try: return [[k, v] for k, v in self.new_api.get_id_map(self.custom_field_name(label, num)).iteritems()] except ValueError: return [] def refresh(self, field=None, ascending=True): self.data.refresh(field=field, ascending=ascending) def get_id_from_uuid(self, uuid): if uuid: return self.new_api.lookup_by_uuid(uuid) def add_listener(self, listener): ''' Add a listener. Will be called on change events with two arguments. Event name and list of affected ids. ''' self.listeners.add(listener) def notify(self, event, ids=[]): 'Notify all listeners' for listener in self.listeners: try: listener(event, ids) except: traceback.print_exc() continue # }}} def path(self, index, index_is_id=False): 'Return the relative path to the directory containing this books files as a unicode string.' book_id = index if index_is_id else self.id(index) return self.new_api.field_for('path', book_id).replace('/', os.sep) def abspath(self, index, index_is_id=False, create_dirs=True): 'Return the absolute path to the directory containing this books files as a unicode string.' path = os.path.join(self.library_path, self.path(index, index_is_id=index_is_id)) if create_dirs and not os.path.exists(path): os.makedirs(path) return path # Adding books {{{ def create_book_entry(self, mi, cover=None, add_duplicates=True, force_id=None): ret = self.new_api.create_book_entry(mi, cover=cover, add_duplicates=add_duplicates, force_id=force_id) if ret is not None: self.data.books_added((ret,)) return ret def add_books(self, paths, formats, metadata, add_duplicates=True, return_ids=False): books = [(mi, {fmt:path}) for mi, path, fmt in zip(metadata, paths, formats)] book_ids, duplicates = self.new_api.add_books(books, add_duplicates=add_duplicates, dbapi=self) if duplicates: paths, formats, metadata = [], [], [] for mi, format_map in duplicates: metadata.append(mi) for fmt, path in format_map.iteritems(): formats.append(fmt) paths.append(path) duplicates = (paths, formats, metadata) ids = book_ids if return_ids else len(book_ids) if book_ids: self.data.books_added(book_ids) return duplicates or None, ids def import_book(self, mi, formats, notify=True, import_hooks=True, apply_import_tags=True, preserve_uuid=False): format_map = {} for path in formats: ext = os.path.splitext(path)[1][1:].upper() if ext == 'OPF': continue format_map[ext] = path book_ids, duplicates = self.new_api.add_books( [(mi, format_map)], add_duplicates=True, apply_import_tags=apply_import_tags, preserve_uuid=preserve_uuid, dbapi=self, run_hooks=import_hooks) if book_ids: self.data.books_added(book_ids) if notify: self.notify('add', book_ids) return book_ids[0] def find_books_in_directory(self, dirpath, single_book_per_directory): return find_books_in_directory(dirpath, single_book_per_directory) def import_book_directory_multiple(self, dirpath, callback=None, added_ids=None): return import_book_directory_multiple(self, dirpath, callback=callback, added_ids=added_ids) def import_book_directory(self, dirpath, callback=None, added_ids=None): return import_book_directory(self, dirpath, callback=callback, added_ids=added_ids) def recursive_import(self, root, single_book_per_directory=True, callback=None, added_ids=None): return recursive_import(self, root, single_book_per_directory=single_book_per_directory, callback=callback, added_ids=added_ids) def add_catalog(self, path, title): book_id, new_book_added = add_catalog(self.new_api, path, title, dbapi=self) if book_id is not None and new_book_added: self.data.books_added((book_id,)) return book_id def add_news(self, path, arg): book_id = add_news(self.new_api, path, arg, dbapi=self) if book_id is not None: self.data.books_added((book_id,)) return book_id def add_format(self, index, fmt, stream, index_is_id=False, path=None, notify=True, replace=True, copy_function=None): ''' path and copy_function are ignored by the new API ''' book_id = index if index_is_id else self.id(index) try: return self.new_api.add_format(book_id, fmt, stream, replace=replace, run_hooks=False, dbapi=self) except: raise else: self.notify('metadata', [book_id]) def add_format_with_hooks(self, index, fmt, fpath, index_is_id=False, path=None, notify=True, replace=True): ''' path is ignored by the new API ''' book_id = index if index_is_id else self.id(index) try: return self.new_api.add_format(book_id, fmt, fpath, replace=replace, run_hooks=True, dbapi=self) except: raise else: self.notify('metadata', [book_id]) # }}} # Custom data {{{ def add_custom_book_data(self, book_id, name, val): self.new_api.add_custom_book_data(name, {book_id:val}) def add_multiple_custom_book_data(self, name, val_map, delete_first=False): self.new_api.add_custom_book_data(name, val_map, delete_first=delete_first) def get_custom_book_data(self, book_id, name, default=None): return self.new_api.get_custom_book_data(name, book_ids={book_id}, default=default).get(book_id, default) def get_all_custom_book_data(self, name, default=None): return self.new_api.get_custom_book_data(name, default=default) def delete_custom_book_data(self, book_id, name): self.new_api.delete_custom_book_data(name, book_ids=(book_id,)) def delete_all_custom_book_data(self, name): self.new_api.delete_custom_book_data(name) def get_ids_for_custom_book_data(self, name): return list(self.new_api.get_ids_for_custom_book_data(name)) # }}} def sort(self, field, ascending, subsort=False): self.multisort([(field, ascending)]) def get_field(self, index, key, default=None, index_is_id=False): book_id = index if index_is_id else self.id(index) mi = self.new_api.get_metadata(book_id, get_cover=key == 'cover') return mi.get(key, default) def cover_last_modified(self, index, index_is_id=False): book_id = index if index_is_id else self.id(index) return self.new_api.cover_last_modified(book_id) or self.last_modified() def cover(self, index, index_is_id=False, as_file=False, as_image=False, as_path=False): book_id = index if index_is_id else self.id(index) return self.new_api.cover(book_id, as_file=as_file, as_image=as_image, as_path=as_path) def copy_cover_to(self, index, dest, index_is_id=False, windows_atomic_move=None, use_hardlink=False): book_id = index if index_is_id else self.id(index) return self.new_api.copy_cover_to(book_id, dest, use_hardlink=use_hardlink) def copy_format_to(self, index, fmt, dest, index_is_id=False, windows_atomic_move=None, use_hardlink=False): book_id = index if index_is_id else self.id(index) return self.new_api.copy_format_to(book_id, fmt, dest, use_hardlink=use_hardlink) def delete_book(self, book_id, notify=True, commit=True, permanent=False, do_clean=True): self.new_api.remove_books((book_id,), permanent=permanent) self.data.books_deleted((book_id,)) if notify: self.notify('delete', [book_id]) def dirtied(self, book_ids, commit=True): self.new_api.mark_as_dirty(frozenset(book_ids) if book_ids is not None else book_ids) def dirty_queue_length(self): return self.new_api.dirty_queue_length() def dump_metadata(self, book_ids=None, remove_from_dirtied=True, commit=True, callback=None): self.new_api.dump_metadata(book_ids=book_ids, remove_from_dirtied=remove_from_dirtied, callback=callback) def authors_sort_strings(self, index, index_is_id=False): book_id = index if index_is_id else self.id(index) return list(self.new_api.author_sort_strings_for_books((book_id,))[book_id]) def author_sort_from_book(self, index, index_is_id=False): return ' & '.join(self.authors_sort_strings(index, index_is_id=index_is_id)) def authors_with_sort_strings(self, index, index_is_id=False): book_id = index if index_is_id else self.id(index) with self.new_api.safe_read_lock: authors = self.new_api._field_ids_for('authors', book_id) adata = self.new_api._author_data(authors) return [(aid, adata[aid]['name'], adata[aid]['sort'], adata[aid]['link']) for aid in authors] def set_sort_field_for_author(self, old_id, new_sort, commit=True, notify=False): changed_books = self.new_api.set_sort_for_authors({old_id:new_sort}) if notify: self.notify('metadata', list(changed_books)) def set_link_field_for_author(self, aid, link, commit=True, notify=False): changed_books = self.new_api.set_link_for_authors({aid:link}) if notify: self.notify('metadata', list(changed_books)) def book_on_device(self, book_id): with self.new_api.safe_read_lock: return self.new_api.fields['ondevice'].book_on_device(book_id) def book_on_device_string(self, book_id): return self.new_api.field_for('ondevice', book_id) def set_book_on_device_func(self, func): self.new_api.fields['ondevice'].set_book_on_device_func(func) @property def book_on_device_func(self): return self.new_api.fields['ondevice'].book_on_device_func def books_in_series(self, series_id): with self.new_api.safe_read_lock: book_ids = self.new_api._books_for_field('series', series_id) ff = self.new_api._field_for return sorted(book_ids, key=lambda x:ff('series_index', x)) def books_in_series_of(self, index, index_is_id=False): book_id = index if index_is_id else self.id(index) series_ids = self.new_api.field_ids_for('series', book_id) if not series_ids: return [] return self.books_in_series(series_ids[0]) def books_with_same_title(self, mi, all_matches=True): title = mi.title ans = set() if title: title = icu_lower(force_unicode(title)) for book_id, x in self.new_api.get_id_map('title').iteritems(): if icu_lower(x) == title: ans.add(book_id) if not all_matches: break return ans def set_conversion_options(self, book_id, fmt, options): self.new_api.set_conversion_options({book_id:options}, fmt=fmt) def conversion_options(self, book_id, fmt): return self.new_api.conversion_options(book_id, fmt=fmt) def has_conversion_options(self, ids, format='PIPE'): return self.new_api.has_conversion_options(ids, fmt=format) def delete_conversion_options(self, book_id, fmt, commit=True): self.new_api.delete_conversion_options((book_id,), fmt=fmt) def set(self, index, field, val, allow_case_change=False): book_id = self.id(index) try: return self.new_api.set_field(field, {book_id:val}, allow_case_change=allow_case_change) finally: self.notify('metadata', [book_id]) def set_identifier(self, book_id, typ, val, notify=True, commit=True): with self.new_api.write_lock: identifiers = self.new_api._field_for('identifiers', book_id) typ, val = clean_identifier(typ, val) if typ: identifiers[typ] = val self.new_api._set_field('identifiers', {book_id:identifiers}) self.notify('metadata', [book_id]) def set_isbn(self, book_id, isbn, notify=True, commit=True): self.set_identifier(book_id, 'isbn', isbn, notify=notify, commit=commit) def set_tags(self, book_id, tags, append=False, notify=True, commit=True, allow_case_change=False): tags = tags or [] with self.new_api.write_lock: if append: otags = self.new_api._field_for('tags', book_id) existing = {icu_lower(x) for x in otags} tags = list(otags) + [x for x in tags if icu_lower(x) not in existing] ret = self.new_api._set_field('tags', {book_id:tags}, allow_case_change=allow_case_change) if notify: self.notify('metadata', [book_id]) return ret def set_metadata(self, book_id, mi, ignore_errors=False, set_title=True, set_authors=True, commit=True, force_changes=False, notify=True): self.new_api.set_metadata(book_id, mi, ignore_errors=ignore_errors, set_title=set_title, set_authors=set_authors, force_changes=force_changes) if notify: self.notify('metadata', [book_id]) def remove_all_tags(self, ids, notify=False, commit=True): self.new_api.set_field('tags', {book_id:() for book_id in ids}) if notify: self.notify('metadata', ids) def _do_bulk_modify(self, field, ids, add, remove, notify): add = cleanup_tags(add) remove = cleanup_tags(remove) remove = set(remove) - set(add) if not ids or (not add and not remove): return remove = {icu_lower(x) for x in remove} with self.new_api.write_lock: val_map = {} for book_id in ids: tags = list(self.new_api._field_for(field, book_id)) existing = {icu_lower(x) for x in tags} tags.extend(t for t in add if icu_lower(t) not in existing) tags = tuple(t for t in tags if icu_lower(t) not in remove) val_map[book_id] = tags self.new_api._set_field(field, val_map, allow_case_change=False) if notify: self.notify('metadata', ids) def bulk_modify_tags(self, ids, add=[], remove=[], notify=False): self._do_bulk_modify('tags', ids, add, remove, notify) def set_custom_bulk_multiple(self, ids, add=[], remove=[], label=None, num=None, notify=False): data = self.backend.custom_field_metadata(label, num) if not data['editable']: raise ValueError('Column %r is not editable'%data['label']) if data['datatype'] != 'text' or not data['is_multiple']: raise ValueError('Column %r is not text/multiple'%data['label']) field = self.custom_field_name(label, num) self._do_bulk_modify(field, ids, add, remove, notify) def unapply_tags(self, book_id, tags, notify=True): self.bulk_modify_tags((book_id,), remove=tags, notify=notify) def is_tag_used(self, tag): return icu_lower(tag) in {icu_lower(x) for x in self.new_api.all_field_names('tags')} def delete_tag(self, tag): self.delete_tags((tag,)) def delete_tags(self, tags): with self.new_api.write_lock: tag_map = {icu_lower(v):k for k, v in self.new_api._get_id_map('tags').iteritems()} tag_ids = (tag_map.get(icu_lower(tag), None) for tag in tags) tag_ids = tuple(tid for tid in tag_ids if tid is not None) if tag_ids: self.new_api._remove_items('tags', tag_ids) def has_id(self, book_id): return self.new_api.has_id(book_id) def format(self, index, fmt, index_is_id=False, as_file=False, mode='r+b', as_path=False, preserve_filename=False): book_id = index if index_is_id else self.id(index) return self.new_api.format(book_id, fmt, as_file=as_file, as_path=as_path, preserve_filename=preserve_filename) def format_abspath(self, index, fmt, index_is_id=False): book_id = index if index_is_id else self.id(index) return self.new_api.format_abspath(book_id, fmt) def format_path(self, index, fmt, index_is_id=False): book_id = index if index_is_id else self.id(index) ans = self.new_api.format_abspath(book_id, fmt) if ans is None: raise NoSuchFormat('Record %d has no format: %s'%(book_id, fmt)) return ans def format_files(self, index, index_is_id=False): book_id = index if index_is_id else self.id(index) return [(v, k) for k, v in self.new_api.format_files(book_id).iteritems()] def format_metadata(self, book_id, fmt, allow_cache=True, update_db=False, commit=False): return self.new_api.format_metadata(book_id, fmt, allow_cache=allow_cache, update_db=update_db) def format_last_modified(self, book_id, fmt): m = self.format_metadata(book_id, fmt) if m: return m['mtime'] def formats(self, index, index_is_id=False, verify_formats=True): book_id = index if index_is_id else self.id(index) ans = self.new_api.formats(book_id, verify_formats=verify_formats) if ans: return ','.join(ans) def has_format(self, index, fmt, index_is_id=False): book_id = index if index_is_id else self.id(index) return self.new_api.has_format(book_id, fmt) def refresh_format_cache(self): self.new_api.refresh_format_cache() def refresh_ondevice(self): self.new_api.refresh_ondevice() def tags_older_than(self, tag, delta, must_have_tag=None, must_have_authors=None): for book_id in sorted(self.new_api.tags_older_than(tag, delta=delta, must_have_tag=must_have_tag, must_have_authors=must_have_authors)): yield book_id def sizeof_format(self, index, fmt, index_is_id=False): book_id = index if index_is_id else self.id(index) return self.new_api.format_metadata(book_id, fmt).get('size', None) def get_metadata(self, index, index_is_id=False, get_cover=False, get_user_categories=True, cover_as_data=False): book_id = index if index_is_id else self.id(index) return self.new_api.get_metadata(book_id, get_cover=get_cover, get_user_categories=get_user_categories, cover_as_data=cover_as_data) def rename_series(self, old_id, new_name, change_index=True): self.new_api.rename_items('series', {old_id:new_name}, change_index=change_index) def get_custom(self, index, label=None, num=None, index_is_id=False): book_id = index if index_is_id else self.id(index) ans = self.new_api.field_for(self.custom_field_name(label, num), book_id) if isinstance(ans, tuple): ans = list(ans) return ans def get_custom_extra(self, index, label=None, num=None, index_is_id=False): data = self.backend.custom_field_metadata(label, num) # add future datatypes with an extra column here if data['datatype'] != 'series': return None book_id = index if index_is_id else self.id(index) return self.new_api.field_for(self.custom_field_name(label, num) + '_index', book_id) def get_custom_and_extra(self, index, label=None, num=None, index_is_id=False): book_id = index if index_is_id else self.id(index) data = self.backend.custom_field_metadata(label, num) ans = self.new_api.field_for(self.custom_field_name(label, num), book_id) if isinstance(ans, tuple): ans = list(ans) if data['datatype'] != 'series': return (ans, None) return (ans, self.new_api.field_for(self.custom_field_name(label, num) + '_index', book_id)) def get_next_cc_series_num_for(self, series, label=None, num=None): data = self.backend.custom_field_metadata(label, num) if data['datatype'] != 'series': return None return self.new_api.get_next_series_num_for(series, field=self.custom_field_name(label, num)) def is_item_used_in_multiple(self, item, label=None, num=None): existing_tags = self.all_custom(label=label, num=num) return icu_lower(item) in {icu_lower(t) for t in existing_tags} def delete_custom_item_using_id(self, item_id, label=None, num=None): self.new_api.remove_items(self.custom_field_name(label, num), (item_id,)) def rename_custom_item(self, old_id, new_name, label=None, num=None): self.new_api.rename_items(self.custom_field_name(label, num), {old_id:new_name}, change_index=False) def delete_item_from_multiple(self, item, label=None, num=None): field = self.custom_field_name(label, num) existing = self.new_api.get_id_map(field) rmap = {icu_lower(v):k for k, v in existing.iteritems()} item_id = rmap.get(icu_lower(item), None) if item_id is None: return [] return list(self.new_api.remove_items(field, (item_id,))) def set_custom(self, book_id, val, label=None, num=None, append=False, notify=True, extra=None, commit=True, allow_case_change=False): field = self.custom_field_name(label, num) data = self.backend.custom_field_metadata(label, num) if data['datatype'] == 'composite': return set() if not data['editable']: raise ValueError('Column %r is not editable'%data['label']) if data['datatype'] == 'enumeration' and ( val and val not in data['display']['enum_values']): return set() with self.new_api.write_lock: if append and data['is_multiple']: current = self.new_api._field_for(field, book_id) existing = {icu_lower(x) for x in current} val = current + tuple(x for x in self.new_api.fields[field].writer.adapter(val) if icu_lower(x) not in existing) affected_books = self.new_api._set_field(field, {book_id:val}, allow_case_change=allow_case_change) else: affected_books = self.new_api._set_field(field, {book_id:val}, allow_case_change=allow_case_change) if data['datatype'] == 'series': s, sidx = get_series_values(val) if sidx is None: extra = 1.0 if extra is None else extra self.new_api._set_field(field + '_index', {book_id:extra}) if notify and affected_books: self.notify('metadata', list(affected_books)) return affected_books def set_custom_bulk(self, ids, val, label=None, num=None, append=False, notify=True, extras=None): if extras is not None and len(extras) != len(ids): raise ValueError('Length of ids and extras is not the same') field = self.custom_field_name(label, num) data = self.backend.custom_field_metadata(label, num) if data['datatype'] == 'composite': return set() if data['datatype'] == 'enumeration' and ( val and val not in data['display']['enum_values']): return if not data['editable']: raise ValueError('Column %r is not editable'%data['label']) if append: for book_id in ids: self.set_custom(book_id, val, label=label, num=num, append=True, notify=False) else: with self.new_api.write_lock: self.new_api._set_field(field, {book_id:val for book_id in ids}, allow_case_change=False) if extras is not None: self.new_api._set_field(field + '_index', {book_id:val for book_id, val in zip(ids, extras)}) if notify: self.notify('metadata', list(ids)) def delete_custom_column(self, label=None, num=None): self.new_api.delete_custom_column(label, num) def create_custom_column(self, label, name, datatype, is_multiple, editable=True, display={}): self.new_api.create_custom_column(label, name, datatype, is_multiple, editable=editable, display=display) def set_custom_column_metadata(self, num, name=None, label=None, is_editable=None, display=None, notify=True, update_last_modified=False): changed = self.new_api.set_custom_column_metadata(num, name=name, label=label, is_editable=is_editable, display=display, update_last_modified=update_last_modified) if changed and notify: self.notify('metadata', []) def remove_cover(self, book_id, notify=True, commit=True): self.new_api.set_cover({book_id:None}) if notify: self.notify('cover', [book_id]) def set_cover(self, book_id, data, notify=True, commit=True): self.new_api.set_cover({book_id:data}) if notify: self.notify('cover', [book_id]) def original_fmt(self, book_id, fmt): nfmt = ('ORIGINAL_%s'%fmt).upper() return nfmt if self.new_api.has_format(book_id, nfmt) else fmt def save_original_format(self, book_id, fmt, notify=True): ret = self.new_api.save_original_format(book_id, fmt) if ret and notify: self.notify('metadata', [book_id]) return ret def restore_original_format(self, book_id, original_fmt, notify=True): ret = self.new_api.restore_original_format(book_id, original_fmt) if ret and notify: self.notify('metadata', [book_id]) return ret def remove_format(self, index, fmt, index_is_id=False, notify=True, commit=True, db_only=False): book_id = index if index_is_id else self.id(index) self.new_api.remove_formats({book_id:(fmt,)}, db_only=db_only) if notify: self.notify('metadata', [book_id]) # Private interface {{{ def __iter__(self): for row in self.data.iterall(): yield row def _get_next_series_num_for_list(self, series_indices): return _get_next_series_num_for_list(series_indices) def _get_series_values(self, val): return _get_series_values(val) # }}} MT = lambda func: types.MethodType(func, None, LibraryDatabase) # Legacy getter API {{{ for prop in ('author_sort', 'authors', 'comment', 'comments', 'publisher', 'max_size', 'rating', 'series', 'series_index', 'tags', 'title', 'title_sort', 'timestamp', 'uuid', 'pubdate', 'ondevice', 'metadata_last_modified', 'languages',): def getter(prop): fm = {'comment':'comments', 'metadata_last_modified': 'last_modified', 'title_sort':'sort', 'max_size':'size'}.get(prop, prop) def func(self, index, index_is_id=False): return self.get_property(index, index_is_id=index_is_id, loc=self.FIELD_MAP[fm]) return func setattr(LibraryDatabase, prop, MT(getter(prop))) for prop in ('series', 'publisher'): def getter(field): def func(self, index, index_is_id=False): book_id = index if index_is_id else self.id(index) ans = self.new_api.field_ids_for(field, book_id) try: return ans[0] except IndexError: pass return func setattr(LibraryDatabase, prop + '_id', MT(getter(prop))) LibraryDatabase.format_hash = MT(lambda self, book_id, fmt:self.new_api.format_hash(book_id, fmt)) LibraryDatabase.index = MT(lambda self, book_id, cache=False:self.data.id_to_index(book_id)) LibraryDatabase.has_cover = MT(lambda self, book_id:self.new_api.field_for('cover', book_id)) LibraryDatabase.get_tags = MT(lambda self, book_id:set(self.new_api.field_for('tags', book_id))) LibraryDatabase.get_categories = MT(lambda self, sort='name', ids=None, icon_map=None:self.new_api.get_categories(sort=sort, book_ids=ids, icon_map=icon_map)) LibraryDatabase.get_identifiers = MT( lambda self, index, index_is_id=False: self.new_api.field_for('identifiers', index if index_is_id else self.id(index))) LibraryDatabase.isbn = MT( lambda self, index, index_is_id=False: self.get_identifiers(index, index_is_id=index_is_id).get('isbn', None)) LibraryDatabase.get_books_for_category = MT( lambda self, category, id_:self.new_api.get_books_for_category(category, id_)) LibraryDatabase.get_data_as_dict = MT(get_data_as_dict) LibraryDatabase.find_identical_books = MT(lambda self, mi:self.new_api.find_identical_books(mi)) LibraryDatabase.get_top_level_move_items = MT(lambda self:self.new_api.get_top_level_move_items()) # }}} # Legacy setter API {{{ for field in ( '!authors', 'author_sort', 'comment', 'has_cover', 'identifiers', 'languages', 'pubdate', '!publisher', 'rating', '!series', 'series_index', 'timestamp', 'uuid', 'title', 'title_sort', ): def setter(field): has_case_change = field.startswith('!') field = {'comment':'comments', 'title_sort':'sort'}.get(field, field) if has_case_change: field = field[1:] acc = field == 'series' def func(self, book_id, val, notify=True, commit=True, allow_case_change=acc): ret = self.new_api.set_field(field, {book_id:val}, allow_case_change=allow_case_change) if notify: self.notify([book_id]) return ret elif field == 'has_cover': def func(self, book_id, val): self.new_api.set_field('cover', {book_id:bool(val)}) else: null_field = field in {'title', 'sort', 'uuid'} retval = (True if field == 'sort' else None) def func(self, book_id, val, notify=True, commit=True): if not val and null_field: return (False if field == 'sort' else None) ret = self.new_api.set_field(field, {book_id:val}) if notify: self.notify([book_id]) return ret if field == 'languages' else retval return func setattr(LibraryDatabase, 'set_%s' % field.replace('!', ''), MT(setter(field))) for field in ('authors', 'tags', 'publisher'): def renamer(field): def func(self, old_id, new_name): id_map = self.new_api.rename_items(field, {old_id:new_name})[1] if field == 'authors': return id_map[old_id] return func fname = field[:-1] if field in {'tags', 'authors'} else field setattr(LibraryDatabase, 'rename_%s' % fname, MT(renamer(field))) LibraryDatabase.update_last_modified = MT( lambda self, book_ids, commit=False, now=None: self.new_api.update_last_modified(book_ids, now=now)) # }}} # Legacy API to get information about many-(one, many) fields {{{ for field in ('authors', 'tags', 'publisher', 'series'): def getter(field): def func(self): return self.new_api.all_field_names(field) return func name = field[:-1] if field in {'authors', 'tags'} else field setattr(LibraryDatabase, 'all_%s_names' % name, MT(getter(field))) LibraryDatabase.all_formats = MT(lambda self:self.new_api.all_field_names('formats')) LibraryDatabase.all_custom = MT(lambda self, label=None, num=None:self.new_api.all_field_names(self.custom_field_name(label, num))) for func, field in {'all_authors':'authors', 'all_titles':'title', 'all_tags2':'tags', 'all_series':'series', 'all_publishers':'publisher'}.iteritems(): def getter(field): def func(self): return self.field_id_map(field) return func setattr(LibraryDatabase, func, MT(getter(field))) LibraryDatabase.all_tags = MT(lambda self: list(self.all_tag_names())) LibraryDatabase.get_all_identifier_types = MT(lambda self: list(self.new_api.fields['identifiers'].table.all_identifier_types())) LibraryDatabase.get_authors_with_ids = MT( lambda self: [[aid, adata['name'], adata['sort'], adata['link']] for aid, adata in self.new_api.author_data().iteritems()]) LibraryDatabase.get_author_id = MT( lambda self, author: {icu_lower(v):k for k, v in self.new_api.get_id_map('authors').iteritems()}.get(icu_lower(author), None)) for field in ('tags', 'series', 'publishers', 'ratings', 'languages'): def getter(field): fname = field[:-1] if field in {'publishers', 'ratings'} else field def func(self): return [[tid, tag] for tid, tag in self.new_api.get_id_map(fname).iteritems()] return func setattr(LibraryDatabase, 'get_%s_with_ids' % field, MT(getter(field))) for field in ('author', 'tag', 'series'): def getter(field): field = field if field == 'series' else (field+'s') def func(self, item_id): return self.new_api.get_item_name(field, item_id) return func setattr(LibraryDatabase, '%s_name' % field, MT(getter(field))) for field in ('publisher', 'series', 'tag'): def getter(field): fname = 'tags' if field == 'tag' else field def func(self, item_id): self.new_api.remove_items(fname, (item_id,)) return func setattr(LibraryDatabase, 'delete_%s_using_id' % field, MT(getter(field))) # }}} # Legacy field API {{{ for func in ( 'standard_field_keys', '!custom_field_keys', 'all_field_keys', 'searchable_fields', 'sortable_field_keys', 'search_term_to_field_key', '!custom_field_metadata', 'all_metadata'): def getter(func): if func.startswith('!'): func = func[1:] def meth(self, include_composites=True): return getattr(self.field_metadata, func)(include_composites=include_composites) elif func == 'search_term_to_field_key': def meth(self, term): return self.field_metadata.search_term_to_field_key(term) else: def meth(self): return getattr(self.field_metadata, func)() return meth setattr(LibraryDatabase, func.replace('!', ''), MT(getter(func))) LibraryDatabase.metadata_for_field = MT(lambda self, field:self.field_metadata.get(field)) # }}} # Miscellaneous API {{{ for meth in ('get_next_series_num_for', 'has_book', 'author_sort_from_authors'): def getter(meth): def func(self, x): return getattr(self.new_api, meth)(x) return func setattr(LibraryDatabase, meth, MT(getter(meth))) LibraryDatabase.move_library_to = MT(lambda self, newloc, progress=None:self.new_api.move_library_to(newloc, progress=progress)) LibraryDatabase.saved_search_names = MT(lambda self:self.new_api.saved_search_names()) LibraryDatabase.saved_search_lookup = MT(lambda self, x:self.new_api.saved_search_lookup(x)) LibraryDatabase.saved_search_set_all = MT(lambda self, smap:self.new_api.saved_search_set_all(smap)) LibraryDatabase.saved_search_delete = MT(lambda self, x:self.new_api.saved_search_delete(x)) LibraryDatabase.saved_search_add = MT(lambda self, x, y:self.new_api.saved_search_add(x, y)) LibraryDatabase.saved_search_rename = MT(lambda self, x, y:self.new_api.saved_search_rename(x, y)) LibraryDatabase.commit_dirty_cache = MT(lambda self: self.new_api.commit_dirty_cache()) # Cleaning is not required anymore LibraryDatabase.clean = LibraryDatabase.clean_custom = MT(lambda self:None) LibraryDatabase.clean_standard_field = MT(lambda self, field, commit=False:None) # apsw operates in autocommit mode LibraryDatabase.commit = MT(lambda self:None) # }}} del MT
gpl-3.0
tobegit3hub/cinder_docker
cinder/db/sqlalchemy/migrate_repo/versions/023_add_expire_reservations_index.py
20
1526
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table def _get_deleted_expire_index(table): members = sorted(['deleted', 'expire']) for idx in table.indexes: if sorted(idx.columns.keys()) == members: return idx def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine reservations = Table('reservations', meta, autoload=True) if _get_deleted_expire_index(reservations): return # Based on expire_reservations query # from: cinder/db/sqlalchemy/api.py index = Index('reservations_deleted_expire_idx', reservations.c.deleted, reservations.c.expire) index.create(migrate_engine) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine reservations = Table('reservations', meta, autoload=True) index = _get_deleted_expire_index(reservations) if index: index.drop(migrate_engine)
apache-2.0
NorfolkDataSci/presentations
2018-01_chatbot/serverless-chatbots-workshop-master/LambdaFunctions/nlp/nltk/tag/api.py
7
2800
# Natural Language Toolkit: Tagger Interface # # Copyright (C) 2001-2016 NLTK Project # Author: Edward Loper <edloper@gmail.com> # Steven Bird <stevenbird1@gmail.com> (minor additions) # URL: <http://nltk.org/> # For license information, see LICENSE.TXT """ Interface for tagging each token in a sentence with supplementary information, such as its part of speech. """ from nltk.internals import overridden from nltk.metrics import accuracy from nltk.tag.util import untag class TaggerI(object): """ A processing interface for assigning a tag to each token in a list. Tags are case sensitive strings that identify some property of each token, such as its part of speech or its sense. Some taggers require specific types for their tokens. This is generally indicated by the use of a sub-interface to ``TaggerI``. For example, featureset taggers, which are subclassed from ``FeaturesetTagger``, require that each token be a ``featureset``. Subclasses must define: - either ``tag()`` or ``tag_sents()`` (or both) """ def tag(self, tokens): """ Determine the most appropriate tag sequence for the given token sequence, and return a corresponding list of tagged tokens. A tagged token is encoded as a tuple ``(token, tag)``. :rtype: list(tuple(str, str)) """ if overridden(self.tag_sents): return self.tag_sents([tokens])[0] else: raise NotImplementedError() def tag_sents(self, sentences): """ Apply ``self.tag()`` to each element of *sentences*. I.e.: return [self.tag(sent) for sent in sentences] """ return [self.tag(sent) for sent in sentences] def evaluate(self, gold): """ Score the accuracy of the tagger against the gold standard. Strip the tags from the gold standard text, retag it using the tagger, then compute the accuracy score. :type gold: list(list(tuple(str, str))) :param gold: The list of tagged sentences to score the tagger on. :rtype: float """ tagged_sents = self.tag_sents(untag(sent) for sent in gold) gold_tokens = sum(gold, []) test_tokens = sum(tagged_sents, []) return accuracy(gold_tokens, test_tokens) def _check_params(self, train, model): if (train and model) or (not train and not model): raise ValueError('Must specify either training data or trained model.') class FeaturesetTaggerI(TaggerI): """ A tagger that requires tokens to be ``featuresets``. A featureset is a dictionary that maps from feature names to feature values. See ``nltk.classify`` for more information about features and featuresets. """
mit
nitzmahone/tower-cli
lib/tower_cli/api.py
2
8189
# Copyright 2014, Ansible, Inc. # Luke Sneeringer <lsneeringer@ansible.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import copy import functools import json import warnings from requests.exceptions import ConnectionError from requests.sessions import Session from requests.models import Response from tower_cli.conf import settings from tower_cli.utils import data_structures, debug, exceptions as exc class Client(Session): """A class for making HTTP requests to the Ansible Tower API and returning the responses. This functions as a wrapper around [requests][1], and returns its responses; therefore, interact with response objects to this class the same way you would with objects you get back from `requests.get` or similar. [1]: http://docs.python-requests.org/en/latest/ """ def __init__(self): super(Client, self).__init__() for adapter in self.adapters.values(): adapter.max_retries = 3 @property def prefix(self): """Return the appropriate URL prefix to prepend to requests, based on the host provided in settings. """ host = settings.host if '://' not in host: host = 'https://%s' % host.strip('/') return '%s/api/v1/' % host.rstrip('/') @functools.wraps(Session.request) def request(self, method, url, *args, **kwargs): """Make a request to the Ansible Tower API, and return the response. """ # Piece together the full URL. url = '%s%s' % (self.prefix, url.lstrip('/')) # Ansible Tower expects authenticated requests; add the authentication # from settings if it's provided. kwargs.setdefault('auth', (settings.username, settings.password)) # POST and PUT requests will send JSON by default; make this # the content_type by default. This makes it such that we don't have # to constantly write that in our code, which gets repetitive. headers = kwargs.get('headers', {}) if method.upper() in ('PATCH', 'POST', 'PUT'): headers.setdefault('Content-Type', 'application/json') kwargs['headers'] = headers # If debugging is on, print the URL and data being sent. debug.log('%s %s' % (method, url), fg='blue', bold=True) if method in ('POST', 'PUT', 'PATCH'): debug.log('Data: %s' % kwargs.get('data', {}), fg='blue', bold=True) if method == 'GET' or kwargs.get('params', None): debug.log('Params: %s' % kwargs.get('params', {}), fg='blue', bold=True) debug.log('') # If this is a JSON request, encode the data value. if headers.get('Content-Type', '') == 'application/json': kwargs['data'] = json.dumps(kwargs.get('data', {})) # Call the superclass method. try: with warnings.catch_warnings(): r = super(Client, self).request(method, url, *args, verify=False, **kwargs) except ConnectionError as ex: if settings.verbose: debug.log('Cannot connect to Tower:', fg='yellow', bold=True) debug.log(str(ex), fg='yellow', bold=True, nl=2) raise exc.ConnectionError( 'There was a network error of some kind trying to connect ' 'to Tower.\n\nThe most common reason for this is a settings ' 'issue; is your "host" value in `tower-cli config` correct?\n' 'Right now it is: "%s".' % settings.host ) # Sanity check: Did the server send back some kind of internal error? # If so, bubble this up. if r.status_code >= 500: raise exc.ServerError('The Tower server sent back a server error. ' 'Please try again later.') # Sanity check: Did we fail to authenticate properly? # If so, fail out now; this is always a failure. if r.status_code == 401: raise exc.AuthError('Invalid Tower authentication credentials.') # Sanity check: Did we get a forbidden response, which means that # the user isn't allowed to do this? Report that. if r.status_code == 403: raise exc.Forbidden("You don't have permission to do that.") # Sanity check: Did we get a 404 response? # Requests with primary keys will return a 404 if there is no response, # and we want to consistently trap these. if r.status_code == 404: raise exc.NotFound('The requested object could not be found.') # Sanity check: Did we get a 405 response? # A 405 means we used a method that isn't allowed. Usually this # is a bad request, but it requires special treatment because the # API sends it as a logic error in a few situations (e.g. trying to # cancel a job that isn't running). if r.status_code == 405: raise exc.MethodNotAllowed( "The Tower server says you can't make a request with the " "%s method to that URL (%s)." % (method, url), ) # Sanity check: Did we get some other kind of error? # If so, write an appropriate error message. if r.status_code >= 400: raise exc.BadRequest( 'The Tower server claims it was sent a bad request.\n\n' '%s %s\nParams: %s\nData: %s\n\nResponse: %s' % (method, url, kwargs.get('params', None), kwargs.get('data', None), r.content.decode('utf8')) ) # Django REST Framework intelligently prints API keys in the # order that they are defined in the models and serializer. # # We want to preserve this behavior when it is possible to do so # with minimal effort, because while the order has no explicit meaning, # we make some effort to order keys in a convenient manner. # # To this end, make this response into an APIResponse subclass # (defined below), which has a `json` method that doesn't lose key # order. r.__class__ = APIResponse # Return the response object. return r @property @contextlib.contextmanager def test_mode(self): """Replace the HTTP adapters with a fauxquests.FauxAdapter, which will make the client into a faux client. """ # Import this here, because we don't want to require fauxquests # in order for the app to work. from fauxquests.adapter import FauxAdapter with settings.runtime_values(host='20.12.4.21', username='meagan', password='This is the best wine.', verbose=False, format='json'): adapters = copy.copy(self.adapters) faux_adapter = FauxAdapter( url_pattern=self.prefix.rstrip('/') + '%s', ) try: self.adapters.clear() self.mount('https://', faux_adapter) self.mount('http://', faux_adapter) yield faux_adapter finally: self.adapters = adapters class APIResponse(Response): """A Response subclass which preseves JSON key order (but makes no other changes). """ def json(self, **kwargs): kwargs.setdefault('object_pairs_hook', data_structures.OrderedDict) return super(APIResponse, self).json(**kwargs) client = Client()
apache-2.0
rspc/mmc-sd40
tools/perf/util/setup.py
766
1540
#!/usr/bin/python2 from distutils.core import setup, Extension from os import getenv from distutils.command.build_ext import build_ext as _build_ext from distutils.command.install_lib import install_lib as _install_lib class build_ext(_build_ext): def finalize_options(self): _build_ext.finalize_options(self) self.build_lib = build_lib self.build_temp = build_tmp class install_lib(_install_lib): def finalize_options(self): _install_lib.finalize_options(self) self.build_dir = build_lib cflags = getenv('CFLAGS', '').split() # switch off several checks (need to be at the end of cflags list) cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ] build_lib = getenv('PYTHON_EXTBUILD_LIB') build_tmp = getenv('PYTHON_EXTBUILD_TMP') libtraceevent = getenv('LIBTRACEEVENT') libapikfs = getenv('LIBAPI') ext_sources = [f.strip() for f in file('util/python-ext-sources') if len(f.strip()) > 0 and f[0] != '#'] perf = Extension('perf', sources = ext_sources, include_dirs = ['util/include'], extra_compile_args = cflags, extra_objects = [libtraceevent, libapikfs], ) setup(name='perf', version='0.1', description='Interface with the Linux profiling infrastructure', author='Arnaldo Carvalho de Melo', author_email='acme@redhat.com', license='GPLv2', url='http://perf.wiki.kernel.org', ext_modules=[perf], cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
gpl-2.0
0x90sled/catapult
third_party/apiclient/googleapiclient/errors.py
25
3511
# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Errors for the library. All exceptions defined by the library should be defined in this file. """ from __future__ import absolute_import __author__ = 'jcgregorio@google.com (Joe Gregorio)' import json from oauth2client import util class Error(Exception): """Base error for this module.""" pass class HttpError(Error): """HTTP data was invalid or unexpected.""" @util.positional(3) def __init__(self, resp, content, uri=None): self.resp = resp self.content = content self.uri = uri def _get_reason(self): """Calculate the reason for the error from the response content.""" reason = self.resp.reason try: data = json.loads(self.content) reason = data['error']['message'] except (ValueError, KeyError): pass if reason is None: reason = '' return reason def __repr__(self): if self.uri: return '<HttpError %s when requesting %s returned "%s">' % ( self.resp.status, self.uri, self._get_reason().strip()) else: return '<HttpError %s "%s">' % (self.resp.status, self._get_reason()) __str__ = __repr__ class InvalidJsonError(Error): """The JSON returned could not be parsed.""" pass class UnknownFileType(Error): """File type unknown or unexpected.""" pass class UnknownLinkType(Error): """Link type unknown or unexpected.""" pass class UnknownApiNameOrVersion(Error): """No API with that name and version exists.""" pass class UnacceptableMimeTypeError(Error): """That is an unacceptable mimetype for this operation.""" pass class MediaUploadSizeError(Error): """Media is larger than the method can accept.""" pass class ResumableUploadError(HttpError): """Error occured during resumable upload.""" pass class InvalidChunkSizeError(Error): """The given chunksize is not valid.""" pass class InvalidNotificationError(Error): """The channel Notification is invalid.""" pass class BatchError(HttpError): """Error occured during batch operations.""" @util.positional(2) def __init__(self, reason, resp=None, content=None): self.resp = resp self.content = content self.reason = reason def __repr__(self): return '<BatchError %s "%s">' % (self.resp.status, self.reason) __str__ = __repr__ class UnexpectedMethodError(Error): """Exception raised by RequestMockBuilder on unexpected calls.""" @util.positional(1) def __init__(self, methodId=None): """Constructor for an UnexpectedMethodError.""" super(UnexpectedMethodError, self).__init__( 'Received unexpected call %s' % methodId) class UnexpectedBodyError(Error): """Exception raised by RequestMockBuilder on unexpected bodies.""" def __init__(self, expected, provided): """Constructor for an UnexpectedMethodError.""" super(UnexpectedBodyError, self).__init__( 'Expected: [%s] - Provided: [%s]' % (expected, provided))
bsd-3-clause
CamelBackNotation/CarnotKE
jyhton/lib-python/2.7/ctypes/test/test_internals.py
109
2623
# This tests the internal _objects attribute import unittest from ctypes import * from sys import getrefcount as grc # XXX This test must be reviewed for correctness!!! """ ctypes' types are container types. They have an internal memory block, which only consists of some bytes, but it has to keep references to other objects as well. This is not really needed for trivial C types like int or char, but it is important for aggregate types like strings or pointers in particular. What about pointers? """ class ObjectsTestCase(unittest.TestCase): def assertSame(self, a, b): self.assertEqual(id(a), id(b)) def test_ints(self): i = 42000123 refcnt = grc(i) ci = c_int(i) self.assertEqual(refcnt, grc(i)) self.assertEqual(ci._objects, None) def test_c_char_p(self): s = "Hello, World" refcnt = grc(s) cs = c_char_p(s) self.assertEqual(refcnt + 1, grc(s)) self.assertSame(cs._objects, s) def test_simple_struct(self): class X(Structure): _fields_ = [("a", c_int), ("b", c_int)] a = 421234 b = 421235 x = X() self.assertEqual(x._objects, None) x.a = a x.b = b self.assertEqual(x._objects, None) def test_embedded_structs(self): class X(Structure): _fields_ = [("a", c_int), ("b", c_int)] class Y(Structure): _fields_ = [("x", X), ("y", X)] y = Y() self.assertEqual(y._objects, None) x1, x2 = X(), X() y.x, y.y = x1, x2 self.assertEqual(y._objects, {"0": {}, "1": {}}) x1.a, x2.b = 42, 93 self.assertEqual(y._objects, {"0": {}, "1": {}}) def test_xxx(self): class X(Structure): _fields_ = [("a", c_char_p), ("b", c_char_p)] class Y(Structure): _fields_ = [("x", X), ("y", X)] s1 = "Hello, World" s2 = "Hallo, Welt" x = X() x.a = s1 x.b = s2 self.assertEqual(x._objects, {"0": s1, "1": s2}) y = Y() y.x = x self.assertEqual(y._objects, {"0": {"0": s1, "1": s2}}) ## x = y.x ## del y ## print x._b_base_._objects def test_ptr_struct(self): class X(Structure): _fields_ = [("data", POINTER(c_int))] A = c_int*4 a = A(11, 22, 33, 44) self.assertEqual(a._objects, None) x = X() x.data = a ##XXX print x._objects ##XXX print x.data[0] ##XXX print x.data._objects if __name__ == '__main__': unittest.main()
apache-2.0
mtpajula/ijonmap
core/project.py
1
4348
#!/usr/bin/env python # -*- coding: utf-8 -*- from .elements.point import Point from .elements.line import Line from .elements.polygon import Polygon import os class Project(object): def __init__(self, messages): self.messages = messages self.points = [] self.lines = [] self.polygons = [] self.title = None self.filepath = None self.users = [] self.saved = True self.draw = True def get_title(self): if self.title is not None: return self.title if self.filepath is not None: filename, file_extension = os.path.splitext(self.filepath) return filename + file_extension return '...' def is_empty(self): if len(self.points) > 0: return False if len(self.lines) > 0: return False if len(self.polygons) > 0: return False return True def get(self, element_type): if element_type == 'point': return self.points elif element_type == 'line': return self.lines elif element_type == 'polygon': return self.polygons else: return False def get_id(self, element_type, id): for element in self.get(element_type): if element.id == id: return element return False def new_point(self): return Point() def new_line(self): return Line() def new_polygon(self): return Polygon() def new(self, element_type): if element_type == 'point': return self.new_point() elif element_type == 'line': return self.new_line() elif element_type == 'polygon': return self.new_polygon() else: return False def save(self, element, show_ok_message = True): if show_ok_message: m = self.messages.add("save " + element.type, "Project") if element.is_valid() is not True: self.messages.set_message_status(m, False, element.type + " is not valid") return False self.get(element.type).append(element) if show_ok_message: self.messages.set_message_status(m, True) self.saved = False return True def edit(self, element): m = self.messages.add("edit " + element.type, "Project") self.messages.set_message_status(m, True) self.saved = False return True def delete(self, element): m = self.messages.add("delete " + element.type, "Project") elements = self.get(element.type) if element in elements: elements.remove(element) self.messages.set_message_status(m, True) self.saved = False return True def is_in_range(self, elements, num): try: elements[num] return True except: return False def get_dictionary(self): data = { 'title' : self.title } d = { 'data' : data, 'points' : [], 'lines' : [], 'polygons' : [] } for point in self.points: d['points'].append(point.get_dictionary()) for line in self.lines: d['lines'].append(line.get_dictionary()) for polygon in self.polygons: d['polygons'].append(polygon.get_dictionary()) return d def set_dictionary(self, d): if 'data' in d: if 'title' in d['data']: self.title = d['data']['title'] for data in d['points']: p = self.new_point() p.set_dictionary(data) self.get('point').append(p) for data in d['lines']: l = self.new_line() l.set_dictionary(data) self.get('line').append(l) for data in d['polygons']: pl = self.new_polygon() pl.set_dictionary(data) self.get('polygon').append(pl)
gpl-2.0
twiest/openshift-tools
openshift/installer/vendored/openshift-ansible-3.2.24/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
91
5294
#!/usr/bin/python # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 """Ansible module for modifying OpenShift configs during an upgrade""" import os import yaml DOCUMENTATION = ''' --- module: openshift_upgrade_config short_description: OpenShift Upgrade Config author: Jason DeTiberus requirements: [ ] ''' EXAMPLES = ''' ''' def modify_api_levels(level_list, remove, ensure, msg_prepend='', msg_append=''): """ modify_api_levels """ changed = False changes = [] if not isinstance(remove, list): remove = [] if not isinstance(ensure, list): ensure = [] if not isinstance(level_list, list): new_list = [] changed = True changes.append("%s created missing %s" % (msg_prepend, msg_append)) else: new_list = level_list for level in remove: if level in new_list: new_list.remove(level) changed = True changes.append("%s removed %s %s" % (msg_prepend, level, msg_append)) for level in ensure: if level not in new_list: new_list.append(level) changed = True changes.append("%s added %s %s" % (msg_prepend, level, msg_append)) return {'new_list': new_list, 'changed': changed, 'changes': changes} def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup): """Main upgrade method for 3.0 to 3.1.""" changes = [] # Facts do not get transferred to the hosts where custom modules run, # need to make some assumptions here. master_config = os.path.join(config_base, 'master/master-config.yaml') master_cfg_file = open(master_config, 'r') config = yaml.safe_load(master_cfg_file.read()) master_cfg_file.close() # Remove unsupported api versions and ensure supported api versions from # master config unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3'] supported_levels = ['v1'] result = modify_api_levels(config.get('apiLevels'), unsupported_levels, supported_levels, 'master-config.yaml:', 'from apiLevels') if result['changed']: config['apiLevels'] = result['new_list'] changes.append(result['changes']) if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']: config['kubernetesMasterConfig'].pop('apiLevels') changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels') # Add masterCA to serviceAccountConfig if 'serviceAccountConfig' in config and 'masterCA' not in config['serviceAccountConfig']: config['serviceAccountConfig']['masterCA'] = config['oauthConfig'].get('masterCA', 'ca.crt') # Add proxyClientInfo to master-config if 'proxyClientInfo' not in config['kubernetesMasterConfig']: config['kubernetesMasterConfig']['proxyClientInfo'] = { 'certFile': 'master.proxy-client.crt', 'keyFile': 'master.proxy-client.key' } changes.append("master-config.yaml: added proxyClientInfo") if len(changes) > 0: if backup: # TODO: Check success: ansible_module.backup_local(master_config) # Write the modified config: out_file = open(master_config, 'w') out_file.write(yaml.safe_dump(config, default_flow_style=False)) out_file.close() return changes def upgrade_master(ansible_module, config_base, from_version, to_version, backup): """Upgrade entry point.""" if from_version == '3.0': if to_version == '3.1': return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup) def main(): """ main """ # disabling pylint errors for global-variable-undefined and invalid-name # for 'global module' usage, since it is required to use ansible_facts # pylint: disable=global-variable-undefined, invalid-name, # redefined-outer-name global module module = AnsibleModule( argument_spec=dict( config_base=dict(required=True), from_version=dict(required=True, choices=['3.0']), to_version=dict(required=True, choices=['3.1']), role=dict(required=True, choices=['master']), backup=dict(required=False, default=True, type='bool') ), supports_check_mode=True, ) from_version = module.params['from_version'] to_version = module.params['to_version'] role = module.params['role'] backup = module.params['backup'] config_base = module.params['config_base'] try: changes = [] if role == 'master': changes = upgrade_master(module, config_base, from_version, to_version, backup) changed = len(changes) > 0 return module.exit_json(changed=changed, changes=changes) # ignore broad-except error to avoid stack trace to ansible user # pylint: disable=broad-except except Exception, e: return module.fail_json(msg=str(e)) # ignore pylint errors related to the module_utils import # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
apache-2.0
neuroo/equip
tests/test_imports.py
2
1461
import pytest from testutils import get_co, get_bytecode import equip from equip.bytecode.utils import show_bytecode IMPORTS_CODE = """ from __future__ import absolute_import from .test_import import * import sub from . import sub from .... import sub import foo.bar from foo import bar import foo.bar as bar from . import foo import foo from .. import bar import bar from ... import bar from .... import bar from foo import bar as baz from foo import bar, baz, fooz from foo import bar as bar1, baz as baz1, fooz as fooz1 from godot import didi, gogo """ def test_imports(): co_imports = get_co(IMPORTS_CODE) import_bc = get_bytecode(co_imports) assert co_imports is not None assert len(import_bc) > 0 import_stmts = equip.BytecodeObject.get_imports_from_bytecode(co_imports, import_bc) assert len(import_stmts) == 18 splitted_imports = filter(lambda imp: imp != '', IMPORTS_CODE.split('\n')) i = 0 for stmt in import_stmts: candidate_import = splitted_imports[i] stmt_str = repr(stmt) assert candidate_import in stmt_str i += 1 IMPORT_AS = "import foo as foo" # same as `import foo` def test_import_as(): co_imports = get_co(IMPORT_AS) import_bc = get_bytecode(co_imports) import_stmts = equip.BytecodeObject.get_imports_from_bytecode(co_imports, import_bc) assert len(import_stmts) == 1 import_stmt = import_stmts[0] assert import_stmt.root == None assert import_stmt.aliases == [('foo', None)]
apache-2.0
mhaessig/servo
tests/wpt/css-tests/tools/manifest/XMLParser.py
97
4413
from os.path import dirname, join from collections import OrderedDict from xml.parsers import expat import xml.etree.ElementTree as etree _catalog = join(dirname(__file__), "catalog") def _wrap_error(e): err = etree.ParseError(e) err.code = e.code err.position = e.lineno, e.offset raise err _names = {} def _fixname(key): try: name = _names[key] except KeyError: name = key if "}" in name: name = "{" + name _names[key] = name return name class XMLParser(object): """ An XML parser with support for XHTML DTDs and all Python-supported encodings This implements the API defined by xml.etree.ElementTree.XMLParser, but supports XHTML DTDs (therefore allowing XHTML entities) and supports all encodings Python does, rather than just those supported by expat. """ def __init__(self, encoding=None): self._parser = expat.ParserCreate(encoding, "}") self._target = etree.TreeBuilder() # parser settings self._parser.buffer_text = 1 self._parser.ordered_attributes = 1 self._parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE) # parser callbacks self._parser.XmlDeclHandler = self._xml_decl self._parser.StartElementHandler = self._start self._parser.EndElementHandler = self._end self._parser.CharacterDataHandler = self._data self._parser.ExternalEntityRefHandler = self._external self._parser.SkippedEntityHandler = self._skipped # used for our horrible re-encoding hack self._fed_data = [] self._read_encoding = None def _xml_decl(self, version, encoding, standalone): self._read_encoding = encoding def _start(self, tag, attrib_in): self._fed_data = None tag = _fixname(tag) attrib = OrderedDict() if attrib_in: for i in range(0, len(attrib_in), 2): attrib[_fixname(attrib_in[i])] = attrib_in[i+1] return self._target.start(tag, attrib) def _data(self, text): return self._target.data(text) def _end(self, tag): return self._target.end(_fixname(tag)) def _external(self, context, base, systemId, publicId): if publicId in { "-//W3C//DTD XHTML 1.0 Transitional//EN", "-//W3C//DTD XHTML 1.1//EN", "-//W3C//DTD XHTML 1.0 Strict//EN", "-//W3C//DTD XHTML 1.0 Frameset//EN", "-//W3C//DTD XHTML Basic 1.0//EN", "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN", "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN", "-//W3C//DTD MathML 2.0//EN", "-//WAPFORUM//DTD XHTML Mobile 1.0//EN" }: parser = self._parser.ExternalEntityParserCreate(context) with open(join(_catalog, "xhtml.dtd"), "rb") as fp: try: parser.ParseFile(fp) except expat.error: return False return True def _skipped(self, name, is_parameter_entity): err = expat.error("undefined entity %s: line %d, column %d" % (name, self._parser.ErrorLineNumber, self._parser.ErrorColumnNumber)) err.code = expat.errors.XML_ERROR_UNDEFINED_ENTITY err.lineno = self._parser.ErrorLineNumber err.offset = self._parser.ErrorColumnNumber raise err def feed(self, data): if self._fed_data is not None: self._fed_data.append(data) try: self._parser.Parse(data, False) except expat.error as v: _wrap_error(v) except ValueError as e: if e.args[0] == 'multi-byte encodings are not supported': assert self._read_encoding is not None xml = b"".join(self._fed_data).decode(self._read_encoding).encode("utf-8") new_parser = XMLParser("utf-8") self._parser = new_parser._parser self._target = new_parser._target self._fed_data = None self.feed(xml) def close(self): try: self._parser.Parse("", True) except expat.error as v: _wrap_error(v) tree = self._target.close() return tree
mpl-2.0
xodus7/tensorflow
tensorflow/contrib/tensor_forest/python/tensor_forest.py
33
26822
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Extremely random forest graph builder. go/brain-tree.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numbers import random from google.protobuf import text_format from tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2 as _tree_proto from tensorflow.contrib.framework.python.ops import variables as framework_variables from tensorflow.contrib.tensor_forest.proto import tensor_forest_params_pb2 as _params_proto from tensorflow.contrib.tensor_forest.python.ops import data_ops from tensorflow.contrib.tensor_forest.python.ops import model_ops from tensorflow.contrib.tensor_forest.python.ops import stats_ops from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import tf_logging as logging # Stores tuples of (leaf model type, stats model type) CLASSIFICATION_LEAF_MODEL_TYPES = { 'all_dense': (_params_proto.MODEL_DENSE_CLASSIFICATION, _params_proto.STATS_DENSE_GINI), 'all_sparse': (_params_proto.MODEL_SPARSE_CLASSIFICATION, _params_proto.STATS_SPARSE_GINI), 'sparse_then_dense': (_params_proto.MODEL_SPARSE_OR_DENSE_CLASSIFICATION, _params_proto.STATS_SPARSE_THEN_DENSE_GINI), } REGRESSION_MODEL_TYPE = ( _params_proto.MODEL_REGRESSION, _params_proto.STATS_LEAST_SQUARES_REGRESSION, _params_proto.COLLECTION_BASIC) FINISH_TYPES = { 'basic': _params_proto.SPLIT_FINISH_BASIC, 'hoeffding': _params_proto.SPLIT_FINISH_DOMINATE_HOEFFDING, 'bootstrap': _params_proto.SPLIT_FINISH_DOMINATE_BOOTSTRAP } PRUNING_TYPES = { 'none': _params_proto.SPLIT_PRUNE_NONE, 'half': _params_proto.SPLIT_PRUNE_HALF, 'quarter': _params_proto.SPLIT_PRUNE_QUARTER, '10_percent': _params_proto.SPLIT_PRUNE_10_PERCENT, 'hoeffding': _params_proto.SPLIT_PRUNE_HOEFFDING, } SPLIT_TYPES = { 'less_or_equal': _tree_proto.InequalityTest.LESS_OR_EQUAL, 'less': _tree_proto.InequalityTest.LESS_THAN } def parse_number_or_string_to_proto(proto, param): if isinstance(param, numbers.Number): proto.constant_value = param else: # assume it's a string if param.isdigit(): proto.constant_value = int(param) else: text_format.Merge(param, proto) def build_params_proto(params): """Build a TensorForestParams proto out of the V4ForestHParams object.""" proto = _params_proto.TensorForestParams() proto.num_trees = params.num_trees proto.max_nodes = params.max_nodes proto.is_regression = params.regression proto.num_outputs = params.num_classes proto.num_features = params.num_features proto.leaf_type = params.leaf_model_type proto.stats_type = params.stats_model_type proto.collection_type = _params_proto.COLLECTION_BASIC proto.pruning_type.type = params.pruning_type proto.finish_type.type = params.finish_type proto.inequality_test_type = params.split_type proto.drop_final_class = False proto.collate_examples = params.collate_examples proto.checkpoint_stats = params.checkpoint_stats proto.use_running_stats_method = params.use_running_stats_method proto.initialize_average_splits = params.initialize_average_splits proto.inference_tree_paths = params.inference_tree_paths parse_number_or_string_to_proto(proto.pruning_type.prune_every_samples, params.prune_every_samples) parse_number_or_string_to_proto(proto.finish_type.check_every_steps, params.early_finish_check_every_samples) parse_number_or_string_to_proto(proto.split_after_samples, params.split_after_samples) parse_number_or_string_to_proto(proto.num_splits_to_consider, params.num_splits_to_consider) proto.dominate_fraction.constant_value = params.dominate_fraction if params.param_file: with open(params.param_file) as f: text_format.Merge(f.read(), proto) return proto # A convenience class for holding random forest hyperparameters. # # To just get some good default parameters, use: # hparams = ForestHParams(num_classes=2, num_features=40).fill() # # Note that num_classes can not be inferred and so must always be specified. # Also, either num_splits_to_consider or num_features should be set. # # To override specific values, pass them to the constructor: # hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill() # # TODO(thomaswc): Inherit from tf.HParams when that is publicly available. class ForestHParams(object): """A base class for holding hyperparameters and calculating good defaults.""" def __init__( self, num_trees=100, max_nodes=10000, bagging_fraction=1.0, num_splits_to_consider=0, feature_bagging_fraction=1.0, max_fertile_nodes=0, # deprecated, unused. split_after_samples=250, valid_leaf_threshold=1, dominate_method='bootstrap', dominate_fraction=0.99, model_name='all_dense', split_finish_name='basic', split_pruning_name='none', prune_every_samples=0, early_finish_check_every_samples=0, collate_examples=False, checkpoint_stats=False, use_running_stats_method=False, initialize_average_splits=False, inference_tree_paths=False, param_file=None, split_name='less_or_equal', **kwargs): self.num_trees = num_trees self.max_nodes = max_nodes self.bagging_fraction = bagging_fraction self.feature_bagging_fraction = feature_bagging_fraction self.num_splits_to_consider = num_splits_to_consider self.max_fertile_nodes = max_fertile_nodes self.split_after_samples = split_after_samples self.valid_leaf_threshold = valid_leaf_threshold self.dominate_method = dominate_method self.dominate_fraction = dominate_fraction self.model_name = model_name self.split_finish_name = split_finish_name self.split_pruning_name = split_pruning_name self.collate_examples = collate_examples self.checkpoint_stats = checkpoint_stats self.use_running_stats_method = use_running_stats_method self.initialize_average_splits = initialize_average_splits self.inference_tree_paths = inference_tree_paths self.param_file = param_file self.split_name = split_name self.early_finish_check_every_samples = early_finish_check_every_samples self.prune_every_samples = prune_every_samples for name, value in kwargs.items(): setattr(self, name, value) def values(self): return self.__dict__ def fill(self): """Intelligently sets any non-specific parameters.""" # Fail fast if num_classes or num_features isn't set. _ = getattr(self, 'num_classes') _ = getattr(self, 'num_features') self.bagged_num_features = int(self.feature_bagging_fraction * self.num_features) self.bagged_features = None if self.feature_bagging_fraction < 1.0: self.bagged_features = [random.sample( range(self.num_features), self.bagged_num_features) for _ in range(self.num_trees)] self.regression = getattr(self, 'regression', False) # Num_outputs is the actual number of outputs (a single prediction for # classification, a N-dimensional point for regression). self.num_outputs = self.num_classes if self.regression else 1 # Add an extra column to classes for storing counts, which is needed for # regression and avoids having to recompute sums for classification. self.num_output_columns = self.num_classes + 1 # Our experiments have found that num_splits_to_consider = num_features # gives good accuracy. self.num_splits_to_consider = self.num_splits_to_consider or min( max(10, math.floor(math.sqrt(self.num_features))), 1000) # If base_random_seed is 0, the current time will be used to seed the # random number generators for each tree. If non-zero, the i-th tree # will be seeded with base_random_seed + i. self.base_random_seed = getattr(self, 'base_random_seed', 0) # How to store leaf models. self.leaf_model_type = ( REGRESSION_MODEL_TYPE[0] if self.regression else CLASSIFICATION_LEAF_MODEL_TYPES[self.model_name][0]) # How to store stats objects. self.stats_model_type = ( REGRESSION_MODEL_TYPE[1] if self.regression else CLASSIFICATION_LEAF_MODEL_TYPES[self.model_name][1]) self.finish_type = ( _params_proto.SPLIT_FINISH_BASIC if self.regression else FINISH_TYPES[self.split_finish_name]) self.pruning_type = PRUNING_TYPES[self.split_pruning_name] if self.pruning_type == _params_proto.SPLIT_PRUNE_NONE: self.prune_every_samples = 0 else: if (not self.prune_every_samples and not (isinstance(numbers.Number) or self.split_after_samples.isdigit())): logging.error( 'Must specify prune_every_samples if using a depth-dependent ' 'split_after_samples') # Pruning half-way through split_after_samples seems like a decent # default, making it easy to select the number being pruned with # pruning_type while not paying the cost of pruning too often. Note that # this only holds if not using a depth-dependent split_after_samples. self.prune_every_samples = (self.prune_every_samples or int(self.split_after_samples) / 2) if self.finish_type == _params_proto.SPLIT_FINISH_BASIC: self.early_finish_check_every_samples = 0 else: if (not self.early_finish_check_every_samples and not (isinstance(numbers.Number) or self.split_after_samples.isdigit())): logging.error( 'Must specify prune_every_samples if using a depth-dependent ' 'split_after_samples') # Checking for early finish every quarter through split_after_samples # seems like a decent default. We don't want to incur the checking cost # too often, but (at least for hoeffding) it's lower than the cost of # pruning so we can do it a little more frequently. self.early_finish_check_every_samples = ( self.early_finish_check_every_samples or int(self.split_after_samples) / 4) self.split_type = SPLIT_TYPES[self.split_name] return self def get_epoch_variable(): """Returns the epoch variable, or [0] if not defined.""" # Grab epoch variable defined in # //third_party/tensorflow/python/training/input.py::limit_epochs for v in tf_variables.local_variables(): if 'limit_epochs/epoch' in v.op.name: return array_ops.reshape(v, [1]) # TODO(thomaswc): Access epoch from the data feeder. return [0] # A simple container to hold the training variables for a single tree. class TreeVariables(object): """Stores tf.Variables for training a single random tree. Uses tf.get_variable to get tree-specific names so that this can be used with a tf.learn-style implementation (one that trains a model, saves it, then relies on restoring that model to evaluate). """ def __init__(self, params, tree_num, training, tree_config='', tree_stat=''): if (not hasattr(params, 'params_proto') or not isinstance(params.params_proto, _params_proto.TensorForestParams)): params.params_proto = build_params_proto(params) params.serialized_params_proto = params.params_proto.SerializeToString() self.stats = None if training: # TODO(gilberth): Manually shard this to be able to fit it on # multiple machines. self.stats = stats_ops.fertile_stats_variable( params, tree_stat, self.get_tree_name('stats', tree_num)) self.tree = model_ops.tree_variable( params, tree_config, self.stats, self.get_tree_name('tree', tree_num)) def get_tree_name(self, name, num): return '{0}-{1}'.format(name, num) class ForestVariables(object): """A container for a forests training data, consisting of multiple trees. Instantiates a TreeVariables object for each tree. We override the __getitem__ and __setitem__ function so that usage looks like this: forest_variables = ForestVariables(params) ... forest_variables.tree ... """ def __init__(self, params, device_assigner, training=True, tree_variables_class=TreeVariables, tree_configs=None, tree_stats=None): self.variables = [] # Set up some scalar variables to run through the device assigner, then # we can use those to colocate everything related to a tree. self.device_dummies = [] with ops.device(device_assigner): for i in range(params.num_trees): self.device_dummies.append(variable_scope.get_variable( name='device_dummy_%d' % i, shape=0)) for i in range(params.num_trees): with ops.device(self.device_dummies[i].device): kwargs = {} if tree_configs is not None: kwargs.update(dict(tree_config=tree_configs[i])) if tree_stats is not None: kwargs.update(dict(tree_stat=tree_stats[i])) self.variables.append(tree_variables_class( params, i, training, **kwargs)) def __setitem__(self, t, val): self.variables[t] = val def __getitem__(self, t): return self.variables[t] class RandomForestGraphs(object): """Builds TF graphs for random forest training and inference.""" def __init__(self, params, tree_configs=None, tree_stats=None, device_assigner=None, variables=None, tree_variables_class=TreeVariables, tree_graphs=None, training=True): self.params = params self.device_assigner = ( device_assigner or framework_variables.VariableDeviceChooser()) logging.info('Constructing forest with params = ') logging.info(self.params.__dict__) self.variables = variables or ForestVariables( self.params, device_assigner=self.device_assigner, training=training, tree_variables_class=tree_variables_class, tree_configs=tree_configs, tree_stats=tree_stats) tree_graph_class = tree_graphs or RandomTreeGraphs self.trees = [ tree_graph_class(self.variables[i], self.params, i) for i in range(self.params.num_trees) ] def _bag_features(self, tree_num, input_data): split_data = array_ops.split( value=input_data, num_or_size_splits=self.params.num_features, axis=1) return array_ops.concat( [split_data[ind] for ind in self.params.bagged_features[tree_num]], 1) def get_all_resource_handles(self): return ([self.variables[i].tree for i in range(len(self.trees))] + [self.variables[i].stats for i in range(len(self.trees))]) def training_graph(self, input_data, input_labels, num_trainers=1, trainer_id=0, **tree_kwargs): """Constructs a TF graph for training a random forest. Args: input_data: A tensor or dict of string->Tensor for input data. input_labels: A tensor or placeholder for labels associated with input_data. num_trainers: Number of parallel trainers to split trees among. trainer_id: Which trainer this instance is. **tree_kwargs: Keyword arguments passed to each tree's training_graph. Returns: The last op in the random forest training graph. Raises: NotImplementedError: If trying to use bagging with sparse features. """ processed_dense_features, processed_sparse_features, data_spec = ( data_ops.ParseDataTensorOrDict(input_data)) if input_labels is not None: labels = data_ops.ParseLabelTensorOrDict(input_labels) data_spec = data_spec or self.get_default_data_spec(input_data) tree_graphs = [] trees_per_trainer = self.params.num_trees / num_trainers tree_start = int(trainer_id * trees_per_trainer) tree_end = int((trainer_id + 1) * trees_per_trainer) for i in range(tree_start, tree_end): with ops.device(self.variables.device_dummies[i].device): seed = self.params.base_random_seed if seed != 0: seed += i # If using bagging, randomly select some of the input. tree_data = processed_dense_features tree_labels = labels if self.params.bagging_fraction < 1.0: # TODO(gilberth): Support bagging for sparse features. if processed_sparse_features is not None: raise NotImplementedError( 'Bagging not supported with sparse features.') # TODO(thomaswc): This does sampling without replacement. Consider # also allowing sampling with replacement as an option. batch_size = array_ops.strided_slice( array_ops.shape(processed_dense_features), [0], [1]) r = random_ops.random_uniform(batch_size, seed=seed) mask = math_ops.less( r, array_ops.ones_like(r) * self.params.bagging_fraction) gather_indices = array_ops.squeeze( array_ops.where(mask), axis=[1]) # TODO(thomaswc): Calculate out-of-bag data and labels, and store # them for use in calculating statistics later. tree_data = array_ops.gather(processed_dense_features, gather_indices) tree_labels = array_ops.gather(labels, gather_indices) if self.params.bagged_features: if processed_sparse_features is not None: raise NotImplementedError( 'Feature bagging not supported with sparse features.') tree_data = self._bag_features(i, tree_data) tree_graphs.append(self.trees[i].training_graph( tree_data, tree_labels, seed, data_spec=data_spec, sparse_features=processed_sparse_features, **tree_kwargs)) return control_flow_ops.group(*tree_graphs, name='train') def inference_graph(self, input_data, **inference_args): """Constructs a TF graph for evaluating a random forest. Args: input_data: A tensor or dict of string->Tensor for the input data. This input_data must generate the same spec as the input_data used in training_graph: the dict must have the same keys, for example, and all tensors must have the same size in their first dimension. **inference_args: Keyword arguments to pass through to each tree. Returns: A tuple of (probabilities, tree_paths, variance). Raises: NotImplementedError: If trying to use feature bagging with sparse features. """ processed_dense_features, processed_sparse_features, data_spec = ( data_ops.ParseDataTensorOrDict(input_data)) probabilities = [] paths = [] for i in range(self.params.num_trees): with ops.device(self.variables.device_dummies[i].device): tree_data = processed_dense_features if self.params.bagged_features: if processed_sparse_features is not None: raise NotImplementedError( 'Feature bagging not supported with sparse features.') tree_data = self._bag_features(i, tree_data) probs, path = self.trees[i].inference_graph( tree_data, data_spec, sparse_features=processed_sparse_features, **inference_args) probabilities.append(probs) paths.append(path) with ops.device(self.variables.device_dummies[0].device): # shape of all_predict should be [batch_size, num_trees, num_outputs] all_predict = array_ops.stack(probabilities, axis=1) average_values = math_ops.div( math_ops.reduce_sum(all_predict, 1), self.params.num_trees, name='probabilities') tree_paths = array_ops.stack(paths, axis=1) expected_squares = math_ops.div( math_ops.reduce_sum(all_predict * all_predict, 1), self.params.num_trees) regression_variance = math_ops.maximum( 0., expected_squares - average_values * average_values) return average_values, tree_paths, regression_variance def average_size(self): """Constructs a TF graph for evaluating the average size of a forest. Returns: The average number of nodes over the trees. """ sizes = [] for i in range(self.params.num_trees): with ops.device(self.variables.device_dummies[i].device): sizes.append(self.trees[i].size()) return math_ops.reduce_mean(math_ops.to_float(array_ops.stack(sizes))) # pylint: disable=unused-argument def training_loss(self, features, labels, name='training_loss'): return math_ops.negative(self.average_size(), name=name) # pylint: disable=unused-argument def validation_loss(self, features, labels): return math_ops.negative(self.average_size()) def average_impurity(self): """Constructs a TF graph for evaluating the leaf impurity of a forest. Returns: The last op in the graph. """ impurities = [] for i in range(self.params.num_trees): with ops.device(self.variables.device_dummies[i].device): impurities.append(self.trees[i].average_impurity()) return math_ops.reduce_mean(array_ops.stack(impurities)) def feature_importances(self): tree_counts = [self.trees[i].feature_usage_counts() for i in range(self.params.num_trees)] total_counts = math_ops.reduce_sum(array_ops.stack(tree_counts, 0), 0) return total_counts / math_ops.reduce_sum(total_counts) class RandomTreeGraphs(object): """Builds TF graphs for random tree training and inference.""" def __init__(self, variables, params, tree_num): self.variables = variables self.params = params self.tree_num = tree_num def training_graph(self, input_data, input_labels, random_seed, data_spec, sparse_features=None, input_weights=None): """Constructs a TF graph for training a random tree. Args: input_data: A tensor or placeholder for input data. input_labels: A tensor or placeholder for labels associated with input_data. random_seed: The random number generator seed to use for this tree. 0 means use the current time as the seed. data_spec: A data_ops.TensorForestDataSpec object specifying the original feature/columns of the data. sparse_features: A tf.SparseTensor for sparse input data. input_weights: A float tensor or placeholder holding per-input weights, or None if all inputs are to be weighted equally. Returns: The last op in the random tree training graph. """ # TODO(gilberth): Use this. unused_epoch = math_ops.to_int32(get_epoch_variable()) if input_weights is None: input_weights = [] sparse_indices = [] sparse_values = [] sparse_shape = [] if sparse_features is not None: sparse_indices = sparse_features.indices sparse_values = sparse_features.values sparse_shape = sparse_features.dense_shape if input_data is None: input_data = [] leaf_ids = model_ops.traverse_tree_v4( self.variables.tree, input_data, sparse_indices, sparse_values, sparse_shape, input_spec=data_spec.SerializeToString(), params=self.params.serialized_params_proto) update_model = model_ops.update_model_v4( self.variables.tree, leaf_ids, input_labels, input_weights, params=self.params.serialized_params_proto) finished_nodes = stats_ops.process_input_v4( self.variables.tree, self.variables.stats, input_data, sparse_indices, sparse_values, sparse_shape, input_labels, input_weights, leaf_ids, input_spec=data_spec.SerializeToString(), random_seed=random_seed, params=self.params.serialized_params_proto) with ops.control_dependencies([update_model]): return stats_ops.grow_tree_v4( self.variables.tree, self.variables.stats, finished_nodes, params=self.params.serialized_params_proto) def inference_graph(self, input_data, data_spec, sparse_features=None): """Constructs a TF graph for evaluating a random tree. Args: input_data: A tensor or placeholder for input data. data_spec: A TensorForestDataSpec proto specifying the original input columns. sparse_features: A tf.SparseTensor for sparse input data. Returns: A tuple of (probabilities, tree_paths). """ sparse_indices = [] sparse_values = [] sparse_shape = [] if sparse_features is not None: sparse_indices = sparse_features.indices sparse_values = sparse_features.values sparse_shape = sparse_features.dense_shape if input_data is None: input_data = [] return model_ops.tree_predictions_v4( self.variables.tree, input_data, sparse_indices, sparse_values, sparse_shape, input_spec=data_spec.SerializeToString(), params=self.params.serialized_params_proto) def size(self): """Constructs a TF graph for evaluating the current number of nodes. Returns: The current number of nodes in the tree. """ return model_ops.tree_size(self.variables.tree) def feature_usage_counts(self): return model_ops.feature_usage_counts( self.variables.tree, params=self.params.serialized_params_proto)
apache-2.0
ssorgatem/qiime
scripts/compare_alpha_diversity.py
15
12087
#!/usr/bin/env python # File created on 06 Jun 2011 from __future__ import division __author__ = "William Van Treuren" __copyright__ = "Copyright 2011, The QIIME project" __credits__ = ["William Van Treuren", "Greg Caparaso", "Jai Ram Rideout"] __license__ = "GPL" __version__ = "1.9.1-dev" __maintainer__ = "William Van Treuren" __email__ = "vantreur@colorado.edu" import os from os.path import join from qiime.util import (parse_command_line_parameters, make_option, create_dir) from qiime.compare_alpha_diversity import (compare_alpha_diversities, _correct_compare_alpha_results, test_types, correction_types, generate_alpha_diversity_boxplots) script_info = {} script_info[ 'brief_description'] = """This script compares alpha diversities based on a two-sample t-test using either parametric or non-parametric (Monte Carlo) methods.""" script_info['script_description'] = """ This script compares the alpha diversity of samples found in a collated alpha diversity file. The comparison is done not between samples, but between groups of samples. The groupings are created via the input category passed via -c/--category. Any samples which have the same value under the catgory will be grouped. For example, if your mapping file had a category called 'Treatment' that separated your samples into three groups (Treatment='Control', Treatment='Drug', Treatment='2xDose'), passing 'Treatment' to this script would cause it to compare (Control,Drug), (Control,2xDose), (2xDose, Drug) alpha diversity values. By default the two-sample t-test will be nonparametric (i.e. using Monte Carlo permutations to calculate the p-value), though the user has the option to make the test a parametric t-test. The script creates an output file in tab-separated format where each row is a different group comparison. The columns in each row denote which two groups of samples are being compared, as well as the mean and standard deviation of each group's alpha diversity. Finally, the t-statistic and p-value are reported for the comparison. This file can be most easily viewed in a spreadsheet program such as Excel. Note: Any iterations of a rarefaction at a given depth will be averaged. For instance, if your collated_alpha file had 10 iterations of the rarefaction at depth 480, the scores for the alpha diversity metrics of those 10 iterations would be averaged (within sample). The iterations are not controlled by this script; when multiple_rarefactions.py is called, the -n option specifies the number of iterations that have occurred. The multiple comparison correction takes into account the number of between group comparisons. If you do not know the rarefaction depth available or you want to use the deepest rarefaction level available then do not pass -d/--depth and it will default to using the deepest available. If t-statistics and/or p-values are None for any of your comparisons, there are three possible reasons. The first is that there were undefined values in your collated alpha diversity input file. This occurs if there were too few sequences in one or more of the samples in the groups involved in those comparisons to compute alpha diversity at that depth. You can either rerun %prog passing a lower value for --depth, or you can re-run alpha diversity after filtering samples with too few sequences. The second is that you had some comparison where each treatment was represented by only a single sample. It is not possible to perform a two-sample t-test on two samples each of length 1, so None will be reported instead. The third possibility occurs when using the nonparamteric t-test with small datasets where the Monte Carlo permutations don't return a p-value because the distribution of the data has no variance. The multiple comparisons correction will not penalize you for comparisons that return as None regardless of origin. If the means/standard deviations are None for any treatment group, the likely cause is that there is an \'n\\a\' value in the collated_alpha file that was passed. """ script_info['script_usage'] = [] script_info['script_usage'].append(("Comparing alpha diversities", "The following command takes the following input: a mapping file (which " "associaties each sample with a number of characteristics), alpha diversity " "metric (the results of collate_alpha for an alpha diverity metric, like " "PD_whole_tree), depth (the rarefaction depth to use for comparison), " "category (the category in the mapping file to determine which samples to " "compare to each other), and output filepath (a path to the output file to be created). A " "nonparametric two sample t-test is run to compare the alpha diversities " "using the default number of Monte Carlo permutations (999).", "%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment -d 100 -o Treatment_PD100")) script_info['script_usage'].append(("Comparing alpha diversities", "Similar to above, but performs comparisons for two categories.", "%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment,DOB -d 100 -o Treatment_DOB_PD100")) script_info['script_usage'].append(("Parametric t-test", "The following command runs a parametric two sample t-test using the " "t-distribution instead of Monte Carlo permutations at rarefaction depth 100.", "%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment -d 100 -o " "PD_d100_parametric -t parametric")) script_info['script_usage'].append(("Parametric t-test", "The following command runs a parametric two sample t-test using the " "t-distribution instead of Monte Carlo permutations at the greatest depth available.", "%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment -o " "PD_dmax_parametric -t parametric")) script_info['output_description'] = """ Generates a tsv stats file and pdf of boxplots for each input category. Each row in the tsv file corresponds to a comparison between two groups of treatment values, and includes the means and standard deviations of the two groups' alpha diversities, along with the results of the two-sample t-test. """ script_info[ 'script_usage_output_to_remove'] = [ '$PWD/PD_dmax_parametric.txt', '$PWD/PD_d100_parametric.txt', '$PWD/PD_d100.txt'] script_info['required_options'] = [ make_option('-i', '--alpha_diversity_fp', action='store', type='existing_filepath', help='path to collated alpha diversity file (as generated by ' 'collate_alpha.py) [REQUIRED]'), make_option('-m', '--mapping_fp', action='store', type='existing_filepath', help='path to the mapping file [REQUIRED]'), make_option('-c', '--categories', action='store', type='string', help='comma-separated list of categories for comparison [REQUIRED]'), make_option('-o', '--output_dir', action='store', type='new_dirpath', help='directory where output files should be stored [REQUIRED]')] script_info['optional_options'] = [ make_option('-t', '--test_type', type='choice', choices=test_types, help='the type of test to perform when calculating the p-values. Valid ' 'choices: ' + ', '.join(test_types) + '. If test_type is ' 'nonparametric, Monte Carlo permutations will be used to determine the ' 'p-value. If test_type is parametric, the num_permutations option will ' 'be ignored and the t-distribution will be used instead [default: ' '%default]', default='nonparametric'), make_option('-n', '--num_permutations', type='int', default=999, help='the number of permutations to perform when calculating the ' 'p-value. Must be greater than 10. Only applies if test_type is ' 'nonparametric [default: %default]'), make_option('-p', '--correction_method', type='choice', choices=correction_types, help='method to use for correcting multiple ' 'comparisons. Available methods are bonferroni, fdr, or none. ' '[default: %default]', default='bonferroni'), make_option('-d', '--depth', type='int', default=None, help='depth of rarefaction file to use [default: greatest depth]')] script_info['version'] = __version__ def main(): option_parser, opts, args = parse_command_line_parameters(**script_info) mapping_fp = opts.mapping_fp alpha_diversity_fp = opts.alpha_diversity_fp categories = opts.categories.split(',') depth = opts.depth output_dir = opts.output_dir correction_method = opts.correction_method test_type = opts.test_type num_permutations = opts.num_permutations if num_permutations < 10: option_parser.error('Number of permuations must be greater than or ' 'equal to 10.') create_dir(output_dir) for category in categories: stat_output_fp = join(output_dir, '%s_stats.txt' % category) boxplot_output_fp = join(output_dir, '%s_boxplots.pdf' % category) alpha_diversity_f = open(alpha_diversity_fp, 'U') mapping_f = open(mapping_fp, 'U') ttest_result, alphadiv_avgs = \ compare_alpha_diversities(alpha_diversity_f, mapping_f, category, depth, test_type, num_permutations) alpha_diversity_f.close() mapping_f.close() corrected_result = _correct_compare_alpha_results(ttest_result, correction_method) # write stats results stat_output_f = open(stat_output_fp, 'w') header = ('Group1\tGroup2\tGroup1 mean\tGroup1 std\tGroup2 mean\t' 'Group2 std\tt stat\tp-value') lines = [header] for (t0, t1), v in corrected_result.items(): lines.append('\t'.join(map(str, [t0, t1, alphadiv_avgs[t0][0], alphadiv_avgs[t0][1], alphadiv_avgs[t1][0], alphadiv_avgs[t1][1], v[0], v[1]]))) stat_output_f.write('\n'.join(lines) + '\n') stat_output_f.close() # write box plots alpha_diversity_f = open(alpha_diversity_fp, 'U') mapping_f = open(mapping_fp, 'U') boxplot = generate_alpha_diversity_boxplots(alpha_diversity_f, mapping_f, category, depth) alpha_diversity_f.close() mapping_f.close() boxplot.savefig(boxplot_output_fp) if __name__ == "__main__": main()
gpl-2.0
pyfa-org/eos
eos/util/repr.py
1
1866
# ============================================================================== # Copyright (C) 2011 Diego Duclos # Copyright (C) 2011-2018 Anton Vorobyov # # This file is part of Eos. # # Eos is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Eos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Eos. If not, see <http://www.gnu.org/licenses/>. # ============================================================================== def make_repr_str(instance, spec=None): """Prepare string for printing info about passed object. Args: instance: Object which we should get info from. spec (optional): Iterable which defines which fields we should include in info string. Each iterable element can be single attribute name, or tuple/list with two elements, where first defines reference name for info string, and second defines actual attribute name. Returns: String, which includes object's class name and requested additional fields. """ arg_list = [] for field in spec or (): if isinstance(field, str): repr_name, attr_name = field, field else: repr_name, attr_name = field attr_val = getattr(instance, attr_name, 'N/A') arg_list.append('{}={}'.format(repr_name, attr_val)) return '<{}({})>'.format(type(instance).__name__, ', '.join(arg_list))
lgpl-3.0
taoger/titanium_mobile
site_scons/apicoverage.py
30
13031
#!/usr/bin/env python # # Titanium API Coverage Generator # # Initial Author: Jeff Haynie, 3/30/09 # import glob, re, os.path as path import fnmatch, os, sys, types import traceback cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename)) sys.path.append(path.join(cwd, "..", "build")) sys.path.append(path.join(cwd, "..", "support", "common")) import simplejson as json import titanium_version baseVersion = titanium_version.version t = baseVersion.split(".") defaultVersion = "%s.%s" % (t[0],t[1]) default_mobile_platforms = ["android-1.5", "iphone-2.2.1", "iphone-3.0", "iphone-3.0", "iphone-3.1"] default_platforms = default_mobile_platforms class GlobDirectoryWalker: # a forward iterator that traverses a directory tree def __init__(self, directory, patterns=['*']): self.stack = [directory] self.patterns = patterns self.files = [] self.index = 0 def __getitem__(self, index): while 1: try: file = self.files[self.index] self.index = self.index + 1 except IndexError: # pop next directory from stack self.directory = self.stack.pop() self.files = os.listdir(self.directory) self.index = 0 else: # got a filename fullname = os.path.join(self.directory, file) if os.path.isdir(fullname) and not os.path.islink(fullname): self.stack.append(fullname) for pattern in self.patterns: if fnmatch.fnmatch(file, pattern): return fullname def convert_type(value): # trivial type conversions if type(value)!=str: return value if value == 'True' or value == 'true': return True elif value == 'False' or value == 'false': return False elif re.match('^[0-9]+$',value): return int(value) elif re.match('^[0-9\.]+$',value): return float(value) return value def parse_key_value_pairs(pairs, metadata): for kvpair in pairs.strip().split(','): key, value = kvpair.split('=') metadata[key.strip()] = value.strip() return metadata def get_property(h,name,default,convert=True): if not name in h: return default try: if convert: return convert_type(h[name]) else: return h[name] except: return default class Module(object): def __init__(self, name): self.name = name.strip() self.api_points = [] self.api_points_map = {} def add_api(self, api): print ">> adding api = %s" % api.name if api.name in self.api_points_map.keys(): raise Exception("Tried to add %s API twice!" % api.name) else: self.api_points.append(api) self.api_points_map[api.name] = api api.module = self def get_api_with_name(self, api_name): if not api_name in self.api_points_map.keys(): raise Exception("Tried to modify %s API before defining it!" % api_name) else: return self.api_points_map[api_name] @staticmethod def get_with_name(name): if not(name in Module.modules.keys()): Module.modules[name] = Module(name) return Module.modules[name] @staticmethod def all_as_dict(): d = {} for m in Module.modules.values(): c = {} for k in m.api_points_map: v=m.api_points_map[k] if k.find(".")!=-1: a,b = k.split('.',1) if c.has_key(a): obj = c[a] for kk in v: obj[kk] = v[kk] else: c[a]=v if c.has_key("description"): del c["description"] if c.has_key("deprecated"): del c["deprecated"] if not c.has_key("object"): c["object"]=True else: c[k]=v d[m.name] = c return d class API(dict): @staticmethod def create_with_full_name(fullName): module = None api = None if fullName.find(".") == -1: module = Module.get_with_name('<global>') api = API(fullName, module) else: module_name, api_name = fullName.strip().split('.', 1) module = Module.get_with_name(module_name) if api_name.find(".")!=-1: sub_module_name, sub_api_name = api_name.split('.',1) if module.api_points_map.has_key(api_name): sub_module_api = module.api_points_map[api_name] else: sub_module_api = API(sub_module_name,module) module.api_points_map[api_name]=sub_module_api api = API(sub_api_name) sub_module_api.add_object(api) print "adding to submodule %s -- %s" % (module.name, api.name) return api else: api = API(api_name, module) module.add_api(api) print "adding %s -- %s" % (api.module.name, api.name) return api @staticmethod def get_with_full_name(fullName): module_name, api_name = fullName.strip().split('.', 1) module = Module.get_with_name(module_name) api = module.get_api_with_name(api_name) return api def __init__(self, name, module=None): API.count += 1 self.name = self['name'] = name.strip() self.module = module self['deprecated'] = False self['since'] = defaultVersion self['description'] = '' def add_object(self,obj): self[obj.name]=obj self['object']=True def add_metadata(self, metadata, is_property = False): self.name = get_property(metadata, 'name', self.name) self['deprecated'] = get_property(metadata, 'deprecated', self['deprecated']) self['description'] = get_property(metadata, 'description', self['description']) self['since'] = get_property(metadata, 'since', self['since'], convert=False) if get_property(metadata, 'method', False): self['method'] = True self['returns'] = None self['arguments'] = [] if get_property(metadata, 'property', False) or is_property: self['property'] = True # default to property if metadata.has_key('platforms'): platforms = {} for osname in metadata['platforms'].split('|'): platforms[osname]=[] self['platforms'] = platforms else: self['platforms'] = default_platforms print self['platforms'] if self.has_key('method') == False and self.has_key('property') == False: raise Exception("invalid metadata for %s - missing either 'property' or 'method'" % self.name) def __str__(self): return 'API<%s>' % self.name def add_argument(self,arg): try: if not self.has_key('method'): self['method']=True self['arguments']=[] self['arguments'].append(arg) except Exception, e: raise Exception("Invalid type on add_argument: %s, Error was: %s, Object is: %s" % (self.name,e,self)) def set_return_type(self,return_type): self['returns'] = return_type def set_deprecated(self,msg,version): self.deprecated = True self['deprecated'] = msg self['deprecated_on'] = version class APIArgument(dict): def __init__(self, params, description): self['description'] = description self['name'] = params['name'] self.forname = params['for'] self['type'] = get_property(params,'type','object') self['optional'] = get_property(params,'optional',False) def __str__(self): return 'APIArgument<%s>' % self['name'] class APIReturnType(dict): def __init__(self, params, description): self.forname = params['for'] self['description'] = description self['type'] = get_property(params,'type','void') def __str__(self): return 'APIReturnType<%s>' % self['name'] def get_last_method_before(method_index, start): current_start = None method_starts = method_index.keys() method_starts.sort() for method_start in method_starts: if method_start > start: break else: current_start = method_start if current_start: return method_index[current_start] else: return None def generate_api_coverage(dirs,fs): API.count = 0 Module.modules = {} api_pattern = '@tiapi\(([^\)]*)\)(.*)' arg_pattern = '@tiarg\(([^\)]*)\)(.*)' res_pattern = '@tiresult\(([^\)]*)\)(.*)' dep_pattern = '@tideprecated\(([^\)]*)\)(.*)' context_sensitive_arg_pattern = '@tiarg\[([^\]]+)\](.*)' context_sensitive_result_pattern = '@tiresult\[([^\]]+)\](.*)' context_sensitive_api_description = '@tiapi (.*)' context_sensitive_arg_description = '@tiarg (.*)' context_sensitive_result_description = '@tiresult (.*)' tiproperty_pattern = '@tiproperty\[([^\]]+)\](.*)' files = set() files_with_matches = set() extensions = ['h','cc','c','cpp','m','mm','js','py','rb'] extensions = ['*.' + x for x in extensions] for dirname in dirs: print dirname for i in GlobDirectoryWalker(dirname, extensions): files.add(i) for filename in files: current_api = None current_arg = None current_result = None match = None for line in open(filename,'r').read().splitlines(): try: m = re.search(api_pattern, line) if m is not None: files_with_matches.add(filename) metadata = parse_key_value_pairs(m.group(1).strip(), {}) metadata['description'] = m.group(2).strip() api = API.create_with_full_name(metadata['name']) api.add_metadata(metadata) current_api = api continue m = re.search(tiproperty_pattern, line) if m is not None: files_with_matches.add(filename) bits = m.group(1).split(',', 2) metadata = {} metadata['type'] = bits[0] metadata['description'] = m.group(2).strip() if len(bits) > 2: metadata = parse_key_value_pairs(bits[2], metadata) api = API.create_with_full_name(bits[1]) api.add_metadata(metadata,True) current_api = api continue m = re.search(context_sensitive_arg_pattern, line) if m is not None: files_with_matches.add(filename) if not current_api: continue bits = m.group(1).split(',', 2) metadata = {} metadata['for'] = current_api.name metadata['type'] = bits[0].strip() metadata['name'] = bits[1].strip() metadata['description'] = m.group(2).strip() if len(bits) > 2: metadata = parse_key_value_pairs(bits[2], metadata) current_arg = APIArgument(metadata, metadata['description']) current_api.add_argument(current_arg) continue m = re.search(context_sensitive_result_pattern, line) if m is not None: files_with_matches.add(filename) if not current_api: continue metadata = {} metadata['type'] = m.group(1).strip() metadata['description'] = m.group(2).strip() metadata['for'] = current_api.name current_result = APIReturnType(metadata, metadata['description']) current_api.set_return_type(current_result) continue m = re.search(context_sensitive_api_description, line) if m is not None: files_with_matches.add(filename) description = m.group(1) if current_api: description = current_api['description'] + ' ' + description.strip() current_api['description'] = description.strip() continue m = re.search(context_sensitive_arg_description, line) if m is not None: files_with_matches.add(filename) description = m.group(1) if current_arg: description = current_arg['description'] + ' ' + description.strip() current_arg['description'] = description.strip() continue m = re.search(context_sensitive_result_description, line) if m is not None: files_with_matches.add(filename) description = m.group(1) if current_result: description = current_result['description'] + ' ' + description.strip() current_result['description'] = description.strip() continue m = re.search(arg_pattern, line) if m is not None: files_with_matches.add(filename) description = m.group(2).strip() metadata = parse_key_value_pairs(m.group(1).strip(), {}) api = API.get_with_full_name(metadata['for']) api.add_argument(APIArgument(metadata, description)) continue m = re.search(res_pattern, line) if m is not None: files_with_matches.add(filename) description = m.group(2).strip() metadata = parse_key_value_pairs(m.group(1).strip(), {}) api = API.get_with_full_name(metadata['for']) api.set_return_type(APIReturnType(metadata, description)) continue m = re.search(dep_pattern, line) if m is not None: files_with_matches.add(filename) description = m.group(2).strip() metadata = parse_key_value_pairs(m.group(1).strip(), {}) api = API.get_with_full_name(metadata['for']) api.set_deprecated(description, metadata['version']) except Exception, e: print "Exception parsing API metadata in file: %s, Exception: %s" % (filename,e) print "Line was: %s" % line raise j = Module.all_as_dict() k = {} for ky in Module.modules.keys(): k[ky]=Module.modules[ky].api_points_map print json.dumps(k, sort_keys=True, indent=4) #global should just be top-level keys g = j["<global>"] for key in g: j[key.strip()]=g[key] del j["<global>"] fs.write(json.dumps(j, sort_keys=True, indent=4)) print "Found %i APIs for %i modules in %i files" % (API.count, len(Module.modules), len(files_with_matches)) if __name__ == '__main__': if len(sys.argv)<3: print "Usage: %s <dir> <outfile> [platform]" % os.path.basename(sys.argv[0]) sys.exit(1) if len(sys.argv)==4: if sys.argv[3]=='mobile': default_platforms = default_mobile_platforms f = open(os.path.expanduser(sys.argv[2]), 'w') dirs = [] dirs.append(os.path.abspath(os.path.expanduser(sys.argv[1]))) generate_api_coverage(dirs,f)
apache-2.0
Ophrys-Project/Ophrys
ophrys/utils/models.py
1
4817
from django.conf.urls import patterns, url, include from django.core.urlresolvers import reverse, NoReverseMatch from django.db import models from .views import ListView, CreateView, DetailView, UpdateView, DeleteView class GetAbsoluteUrlMixin: """ Mixin to add the methods get_absolute_url() and get_absolute_url_name() to a model class. These methods look for an url name in the nested namespace. The top level namespace is the name of the application (including the name of the project), e. g. `yourproject.yourapp`. The low level namespace is the name of the current model (class), e. g. `YourModel`. The url name can be something like `list`, `create`, `detail`, `update` or `delete`. So this mixin tries to reverse e. g. `yourproject.yourapp:YourModel:detail`. The named urls except `list` and `create` have to accept either a pk argument or a slug argument. """ def get_absolute_url(self, url_name='detail'): """ Returns the url concerning the given url name. The url must accept a pk argument or a slug argument if its name is not `list` or `create`. """ if url_name == 'list' or url_name == 'create': return reverse(self.get_absolute_url_name(url_name)) try: return reverse(self.get_absolute_url_name(url_name), kwargs={'pk': str(self.pk)}) except NoReverseMatch: pass # TODO: Raise an specific error message if self.slug does not exist or # reverse does not find an url. return reverse(self.get_absolute_url_name(url_name), kwargs={'slug': str(self.slug)}) def get_absolute_url_name(self, url_name='detail'): """ Returns the full url name (including namespace patterns) of the given url name. """ project_app_name = type(self).__module__.split('.models')[0] class_name = type(self).__name__ return '%s:%s:%s' % (project_app_name, class_name, url_name) class AutoModelMixin(GetAbsoluteUrlMixin): """ Mixin for models to add automaticly designed urls and views. Add this mixin to your model and include YourModel().urls in the urlpatterns of your application:: url(r'^example/', include(YourModel().urls)) The urls and classes for a list view (`/example/`), a create view (`/example/create/`), a detail view (`/example/<pk>/`), an update view (`/example/<pk>/update/`) and a delete view (`/example/<pk>/delete/`) will be setup. You only have to write the corresponding templates with Django's default template names (`yourapp/yourmodel_list.html`, `yourapp/yourmodel_form.html`, `yourapp/yourmodel_detail.html`, `yourapp/yourmodel_confirm_delete.html`). The GetAbsoluteUrlMixin is used, so you have to set the inclusion of the urls into a specific top level namespace concerning to the name of the application (including the name of the project):: url(r'^example_app/', include(yourproject.yourapp.urls), namespace='yourproject.yourapp') """ @property def urls(self): """ Attribute of mixed models. Include this in the urlpatterns of your application:: url(r'^example/', include(YourModel().urls)) """ return (self.get_urlpatterns(), None, type(self).__name__) def get_urlpatterns(self): """ Method to get the urlpatterns object. Override this method to customize the urls. """ return patterns( '', url(r'^$', self.get_view_class('List').as_view(), name='list'), url(r'^create/$', self.get_view_class('Create').as_view(), name='create'), url(r'^(?P<pk>\d+)/$', self.get_view_class('Detail').as_view(), name='detail'), url(r'^(?P<pk>\d+)/update/$', self.get_view_class('Update').as_view(), name='update'), url(r'^(?P<pk>\d+)/delete/$', self.get_view_class('Delete').as_view(), name='delete')) def get_view_class(self, view_name): """ Method to construct the view classes. Override this method to customize them. """ view_class_definitions = {'model': type(self)} if view_name == 'List': view_class = ListView elif view_name == 'Create': view_class = CreateView elif view_name == 'Detail': view_class = DetailView elif view_name == 'Update': view_class = UpdateView elif view_name == 'Delete': view_class = DeleteView view_class_definitions['success_url_name'] = self.get_absolute_url_name('list') else: raise ValueError('The view name "%s" is unknown.' % view_name) return type(view_name, (view_class,), view_class_definitions)
mit
halberom/ansible
lib/ansible/utils/module_docs_fragments/cloudengine.py
28
2959
# # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # Standard files documentation fragment DOCUMENTATION = """ options: host: description: - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport. required: true port: description: - Specifies the port to use when building the connection to the remote device. This value applies to either I(cli) or I(netconf). The port value will default to the appropriate transport common port if none is provided in the task. (cli=22, netconf=22). required: false default: 0 (use common port) username: description: - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate the CLI login. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. required: false password: description: - Specifies the password to use to authenticate the connection to the remote device. This is a common argument used for cli transports. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. required: false default: null ssh_keyfile: description: - Specifies the SSH key to use to authenticate the connection to the remote device. This argument is used for the I(cli) transport. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. required: false transport: description: - Configures the transport connection to use when connecting to the remote device. The transport argument supports connectivity to the device over cli (ssh). required: true default: cli provider: description: - Convenience method that allows all I(cloudengine) arguments to be passed as a dict object. All constraints (required, choices, etc) must be met either by individual arguments or values in this dict. required: false default: null """
gpl-3.0
yasoob/PythonRSSReader
venv/lib/python2.7/dist-packages/gobject/propertyhelper.py
3
10311
# -*- Mode: Python; py-indent-offset: 4 -*- # pygobject - Python bindings for the GObject library # Copyright (C) 2007 Johan Dahlin # # gobject/propertyhelper.py: GObject property wrapper/helper # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 # USA import sys import gobject._gobject _gobject = sys.modules['gobject._gobject'] from gobject.constants import \ TYPE_NONE, TYPE_INTERFACE, TYPE_CHAR, TYPE_UCHAR, \ TYPE_BOOLEAN, TYPE_INT, TYPE_UINT, TYPE_LONG, \ TYPE_ULONG, TYPE_INT64, TYPE_UINT64, TYPE_ENUM, \ TYPE_FLAGS, TYPE_FLOAT, TYPE_DOUBLE, TYPE_STRING, \ TYPE_POINTER, TYPE_BOXED, TYPE_PARAM, TYPE_OBJECT, \ TYPE_PYOBJECT from gobject.constants import \ G_MINFLOAT, G_MAXFLOAT, G_MINDOUBLE, G_MAXDOUBLE, \ G_MININT, G_MAXINT, G_MAXUINT, G_MINLONG, G_MAXLONG, \ G_MAXULONG if sys.version_info >= (3, 0): _basestring = str _long = int else: _basestring = basestring _long = long class property(object): """ Creates a new property which in conjunction with GObject subclass will create a property proxy: >>> class MyObject(gobject.GObject): >>> ... prop = gobject.property(type=str) >>> obj = MyObject() >>> obj.prop = 'value' >>> obj.prop 'value' The API is similar to the builtin property: class AnotherObject(gobject.GObject): @gobject.property def prop(self): return ... Which will create a read-only property called prop. """ class __metaclass__(type): def __repr__(self): return "<class 'gobject.property'>" def __init__(self, getter=None, setter=None, type=None, default=None, nick='', blurb='', flags=_gobject.PARAM_READWRITE, minimum=None, maximum=None): """ @param getter: getter to get the value of the property @type getter: callable @param setter: setter to set the value of the property @type setter: callable @param type: type of property @type type: type @param default: default value @param nick: short description @type bick: string @param blurb: long description @type blurb: string @param flags: parameter flags, one of: - gobject.PARAM_READABLE - gobject.PARAM_READWRITE - gobject.PARAM_WRITABLE - gobject.PARAM_CONSTRUCT - gobject.PARAM_CONSTRUCT_ONLY - gobject.PARAM_LAX_VALIDATION @keyword minimum: minimum allowed value (int, float, long only) @keyword maximum: maximum allowed value (int, float, long only) """ if getter and not setter: setter = self._readonly_setter elif setter and not getter: getter = self._writeonly_getter elif not setter and not getter: getter = self._default_getter setter = self._default_setter self.getter = getter self.setter = setter if type is None: type = object self.type = self._type_from_python(type) self.default = self._get_default(default) self._check_default() if not isinstance(nick, _basestring): raise TypeError("nick must be a string") self.nick = nick if not isinstance(blurb, _basestring): raise TypeError("blurb must be a string") self.blurb = blurb if flags < 0 or flags > 32: raise TypeError("invalid flag value: %r" % (flags,)) self.flags = flags if minimum is not None: if minimum < self._get_minimum(): raise TypeError( "Minimum for type %s cannot be lower than %d" % ( self.type, self._get_minimum())) else: minimum = self._get_minimum() self.minimum = minimum if maximum is not None: if maximum > self._get_maximum(): raise TypeError( "Maximum for type %s cannot be higher than %d" % ( self.type, self._get_maximum())) else: maximum = self._get_maximum() self.maximum = maximum self.name = None self._exc = None def __repr__(self): return '<gobject property %s (%s)>' % ( self.name or '(uninitialized)', _gobject.type_name(self.type)) def __get__(self, instance, klass): if instance is None: return self self._exc = None value = instance.get_property(self.name) if self._exc: exc = self._exc self._exc = None raise exc return value def __set__(self, instance, value): if instance is None: raise TypeError self._exc = None instance.set_property(self.name, value) if self._exc: exc = self._exc self._exc = None raise exc def _type_from_python(self, type_): if type_ == _long: return TYPE_LONG elif type_ == int: return TYPE_INT elif type_ == bool: return TYPE_BOOLEAN elif type_ == float: return TYPE_DOUBLE elif type_ == str: return TYPE_STRING elif type_ == object: return TYPE_PYOBJECT elif (isinstance(type_, type) and issubclass(type_, (_gobject.GObject, _gobject.GEnum))): return type_.__gtype__ elif type_ in [TYPE_NONE, TYPE_INTERFACE, TYPE_CHAR, TYPE_UCHAR, TYPE_INT, TYPE_UINT, TYPE_BOOLEAN, TYPE_LONG, TYPE_ULONG, TYPE_INT64, TYPE_UINT64, TYPE_FLOAT, TYPE_DOUBLE, TYPE_POINTER, TYPE_BOXED, TYPE_PARAM, TYPE_OBJECT, TYPE_STRING, TYPE_PYOBJECT]: return type_ else: raise TypeError("Unsupported type: %r" % (type_,)) def _get_default(self, default): ptype = self.type if default is not None: return default if ptype in [TYPE_INT, TYPE_UINT, TYPE_LONG, TYPE_ULONG, TYPE_INT64, TYPE_UINT64]: return 0 elif ptype == TYPE_STRING: return '' elif ptype == TYPE_FLOAT or ptype == TYPE_DOUBLE: return 0.0 else: return None def _check_default(self): ptype = self.type default = self.default if (ptype == TYPE_BOOLEAN and (default not in (True, False))): raise TypeError( "default must be True or False, not %r" % (default,)) elif ptype == TYPE_PYOBJECT: if default is not None: raise TypeError("object types does not have default values") elif gobject.type_is_a(ptype, TYPE_ENUM): if default is None: raise TypeError("enum properties needs a default value") elif not gobject.type_is_a(default, ptype): raise TypeError("enum value %s must be an instance of %r" % (default, ptype)) def _get_minimum(self): ptype = self.type if ptype in [TYPE_UINT, TYPE_ULONG, TYPE_UINT64]: return 0 # Remember that G_MINFLOAT and G_MINDOUBLE are something different. elif ptype == TYPE_FLOAT: return -G_MAXFLOAT elif ptype == TYPE_DOUBLE: return -G_MAXDOUBLE elif ptype == TYPE_INT: return G_MININT elif ptype == TYPE_LONG: return G_MINLONG elif ptype == TYPE_INT64: return -2 ** 62 - 1 return None def _get_maximum(self): ptype = self.type if ptype == TYPE_UINT: return G_MAXUINT elif ptype == TYPE_ULONG: return G_MAXULONG elif ptype == TYPE_INT64: return 2 ** 62 - 1 elif ptype == TYPE_UINT64: return 2 ** 63 - 1 elif ptype == TYPE_FLOAT: return G_MAXFLOAT elif ptype == TYPE_DOUBLE: return G_MAXDOUBLE elif ptype == TYPE_INT: return G_MAXINT elif ptype == TYPE_LONG: return G_MAXLONG return None # # Getter and Setter # def _default_setter(self, instance, value): setattr(instance, '_property_helper_'+self.name, value) def _default_getter(self, instance): return getattr(instance, '_property_helper_'+self.name, self.default) def _readonly_setter(self, instance, value): self._exc = TypeError("%s property of %s is read-only" % ( self.name, type(instance).__name__)) def _writeonly_getter(self, instance): self._exc = TypeError("%s property of %s is write-only" % ( self.name, type(instance).__name__)) # # Public API # def get_pspec_args(self): ptype = self.type if ptype in [TYPE_INT, TYPE_UINT, TYPE_LONG, TYPE_ULONG, TYPE_INT64, TYPE_UINT64, TYPE_FLOAT, TYPE_DOUBLE]: args = self._get_minimum(), self._get_maximum(), self.default elif (ptype == TYPE_STRING or ptype == TYPE_BOOLEAN or ptype.is_a(TYPE_ENUM)): args = (self.default,) elif ptype == TYPE_PYOBJECT: args = () elif ptype.is_a(TYPE_OBJECT): args = () else: raise NotImplementedError(ptype) return (self.type, self.nick, self.blurb) + args + (self.flags,)
mit
zekearneodo/ephys-tools
zk/pca_filter.py
1
4586
__author__ = 'chris' import logging import tables from scipy import signal import numpy as np def PCA_filter(self, rec_h5_obj, probe): """ Filtering based on doi:10.1016/S0165-0270(01)00516-7 """ data = self.run_group.data D_all_clean = rec_h5_obj.create_carray(self.run_group, '_data_PCA_filt', shape=data.shape, atom=data.atom) logging.info('Starting PCA filtering. Loading data matrix...') D_all_raw = data.read() # TODO: this read should be done by shank instead of entirely at once to save memory. # -- filter and threshold parameters -- t_scalar = 5. # stdeviations away from noise to call a spike. pre_spike_samples=10 # num samples before threshold-crossing to replace with spike-free version post_spike_samples= 10 # num samples after ^^^ rate = data._v_attrs['sampling_rate_Hz'] low = 500. high = 9895.675 _b, _a = signal.butter(3, (low/(rate/2.), high/(rate/2.)), 'pass') sh_cnt = 0 # --- first stage cleaning. for shank in probe.values(): sh_cnt += 1 logging.info('PCA filtering {0}'.format(sh_cnt)) channels = shank['channels'] # print channels D = D_all_raw[:, channels] D_clean = np.zeros(D.shape, dtype=D.dtype) for i in xrange(len(channels)): D_i = D[:,i] D_i_clean = D[:, i].astype(np.float64) noti = [] for ii in xrange(len(channels)): if ii != i: noti.append(ii) D_noti = D[:, noti] u, _, _ = np.linalg.svd(D_noti, full_matrices=False) for i_pc in xrange(3): # first 3 pcs pc = u[:, i_pc] b = np.dot(D_i, pc) / np.dot(pc, pc) pc *= b D_i_clean -= pc D_clean[:, i] = D_i_clean.astype(D.dtype) # --- find spikes, replace spike times with D_noise (spike free noise representation D_noise) D_noise = D - D_clean D_filt_clean_1 = signal.filtfilt(_b,_a, D_clean, axis=0) #filtered representation of cleaned data to find spikes D_nospikes = D.copy() for i in xrange(len(channels)): sig = D_filt_clean_1[:,i] median = np.median(np.abs(sig)) std = median / .6745 threshold = t_scalar * std sig_L = sig < -threshold edges = np.convolve([1, -1], sig_L, mode='same') t_crossings = np.where(edges == 1)[0] for cross in t_crossings: if cross == 0: continue elif cross < pre_spike_samples: st = 0 end = cross+post_spike_samples elif cross + post_spike_samples > len(sig) - 1: st = cross-pre_spike_samples end = len(sig)-1 else: st = cross-pre_spike_samples end = cross+post_spike_samples D_nospikes[st:end, i] = D_noise[st:end,i] # -- 2nd stage cleaning. for i in xrange(len(channels)): # just reuse D_clean's memory space here, as it is not being used by the algorithm any more. D_i_clean = D[:, i].astype(np.float64, copy=True) # Copying from original data matrix. D_i_nospikes = D_nospikes[:, i] noti = [] for ii in xrange(len(channels)): if ii != i: noti.append(ii) D_noti = D_nospikes[:, noti] u, _, _ = np.linalg.svd(D_noti, full_matrices=False) for i_pc in xrange(3): # first 3 pcs pc = u[:, i_pc] b = np.dot(D_i_nospikes, pc) / np.dot(pc, pc) pc *= b D_i_clean -= pc D_clean[:, i] = D_i_clean.astype(D.dtype) # put everything back into the a super D. for i, ch in enumerate(channels): # the channel order is the same as the row in D. D_all_clean[:, ch] = D_clean[:, i] D_all_clean.flush() assert isinstance(data, tables.EArray) logging.info('Renaming plfiltered data to "neural_PL_filtered"') data.rename('neural_PL_filtered') rec_h5_obj.flush() logging.info('Renaming PCA filtered data to "data"') D_all_clean.rename('data') rec_h5_obj.flush() logging.info('PCA filtering complete!') return D_all_clean
gpl-2.0
mfalesni/python-kwargify
test_kwargify.py
1
5569
# -*- coding: utf-8 -*- import pytest from kwargify import kwargify class TestFunctionWithNoArgs(object): @pytest.fixture(scope="class") def function(self): @kwargify def f(): return True return f def test_no_args_given(self, function): function() @pytest.mark.xfail @pytest.mark.parametrize("n", range(1, 4)) def test_args_given(self, function, n): function(*range(n + 1)) def test_kwargs_passed(self, function): function(foo="bar") class TestFunctionWithOnlyArgs(object): @pytest.fixture(scope="class") def function(self): @kwargify def f(a, b): return True return f @pytest.mark.xfail def test_no_args_given(self, function): function() @pytest.mark.xfail def test_args_given_not_enough(self, function): function(1) def test_args_given_enough(self, function): function(1, 2) @pytest.mark.xfail def test_only_kwargs_passed_wrong(self, function): function(foo="bar") @pytest.mark.xfail def test_only_kwargs_passed_not_enough(self, function): function(a="bar") def test_only_kwargs_passed(self, function): function(a=1, b=2) def test_both_passed(self, function): function(1, b=2) class TestFunctionWithDefaultValues(object): @pytest.fixture(scope="class") def function(self): @kwargify def f(a, b=None): return locals() return f def test_pass_only_required(self, function): assert function(1)["b"] is None def test_override_default_with_arg(self, function): assert function(1, 2)["b"] == 2 def test_override_default_with_kwarg(self, function): assert function(1, b=2)["b"] == 2 class TestKwargifyMethod(object): class _TestClass(object): def noargs(self): return locals() def onlyargs(self, a, b): return locals() def withdefault(self, a, b=None): return locals() @pytest.fixture(scope="class") def o(self): return self._TestClass() # No args test def test_no_args_given(self, o): kwargify(o.noargs)() @pytest.mark.xfail @pytest.mark.parametrize("n", range(1, 4)) def test_args_given(self, o, n): kwargify(o.noargs)(*range(n + 1)) def test_kwargs_passed(self, o): kwargify(o.noargs)(foo="bar") # Only args @pytest.mark.xfail def test_no_args_given_fails(self, o): kwargify(o.onlyargs)() @pytest.mark.xfail def test_args_given_not_enough(self, o): kwargify(o.onlyargs)(1) def test_args_given_enough(self, o): kwargify(o.onlyargs)(1, 2) @pytest.mark.xfail def test_only_kwargs_passed_wrong(self, o): kwargify(o.onlyargs)(foo="bar") @pytest.mark.xfail def test_only_kwargs_passed_not_enough(self, o): kwargify(o.onlyargs)(a="bar") def test_only_kwargs_passed(self, o): kwargify(o.onlyargs)(a=1, b=2) def test_both_passed(self, o): kwargify(o.onlyargs)(1, b=2) # Default values def test_pass_only_required(self, o): assert kwargify(o.withdefault)(1)["b"] is None def test_override_default_with_arg(self, o): assert kwargify(o.withdefault)(1, 2)["b"] == 2 def test_override_default_with_kwarg(self, o): assert kwargify(o.withdefault)(1, b=2)["b"] == 2 def test_wrapped_method(): # method wrapping should work the same as function wrapping, # so this only does a minimum of sanity checks class Foo(object): @kwargify def bar(self, x, y, z): return x, y, z f = Foo() args = 1, 2, 3 # method fails correctly with incorrect args, just like a function does with pytest.raises(TypeError): f.bar(**dict(zip(('x', 'y'), args))) # This should not explode (self is handled correctly) ret = f.bar(**dict(zip(('x', 'y', 'z'), args))) # Values should be returned in the same way that they were given assert ret == args def test_wrapped(): # double check that the function wrapper does its job def f(): """doctring!""" pass f.custom_attr = True wrapped_f = kwargify(f) # __wrapped__ should be set assert wrapped_f.__wrapped__ is f # dunder attrs should be copied over assert wrapped_f.__doc__ == f.__doc__ # any public attrs on the wrapped func should be available assert wrapped_f.custom_attr def test_wrap_method(): """Tst whether wrapping already existing method works.""" class A(object): def a(self): return True def b(self, a, b): return locals() def c(self, a, b=None): return locals() a = A() k_a = kwargify(a.a) k_b = kwargify(a.b) k_c = kwargify(a.c) # Plain function assert k_a() # Without nonrequired parameters with pytest.raises(TypeError): k_b() result = k_b(1, 2) assert result["a"] == 1 assert result["b"] == 2 # With nonrequired params with pytest.raises(TypeError): k_c() result_1 = k_c(1, 2) result_2 = k_c(1) assert result_1["a"] == result_2["a"] == 1 assert result_1["b"] == 2 assert result_2["b"] is None def test_wrap_class_constructor(): class A(object): def __init__(self, a, b=None): self.a = a self.b = b cons = kwargify(A) a = cons(a=1) assert a.a == 1 assert a.b is None
lgpl-3.0
markbrough/exchangerates
exchangerates/util.py
1
1344
fred_countries_currencies = { 'Australia': 'AUD', 'Austria': 'ATS', 'Belgium': 'BEF', 'Brazil': 'BRL', 'Canada': 'CAD', 'China': 'CNY', 'Denmark': 'DKK', 'Euro': 'EUR', 'Finland': 'FIM', 'France': 'FRF', 'Germany': 'DEM', 'Greece': 'GRD', 'Hong Kong': 'HKD', 'Ireland': 'IEP', 'Italy': 'ITL', 'India': 'INR', 'Japan': 'JPY', 'Malaysia': 'MYR', 'Mexico': 'MXN', 'Norway': 'NOK', 'Netherlands': 'NLG', 'Portugal': 'PTE', 'Singapore': 'SGD', 'South Korea': 'KRW', 'South Africa': 'ZAR', 'Spain': 'ESP', 'Sri Lanka': 'LKR', 'Sweden': 'SEK', 'Switzerland': 'CHF', 'Taiwan': 'TWD', 'Thailand': 'THB', 'Venezuela': 'VEF', 'New Zealand': 'NZD', 'U.K.': 'GBP' } oecd_countries_currencies = { 'AUS': 'AUD', 'BRA': 'BRL', 'CAN': 'CAD', 'CHE': 'CHF', 'CHL': 'CLP', 'CHN': "CNY", 'COL': 'COP', 'CRI': 'CRC', 'CZE': 'CZK', 'DNK': 'DKK', 'EA19': 'EUR', 'GBR': 'GBP', 'HUN': 'HUF', 'IDN': 'IDR', 'IND': 'INR', 'ISL': 'ISK', 'ISR': 'ILS', 'JPN': 'JPY', 'KOR': 'KRW', 'LVA': 'LVL', 'MEX': 'MXN', 'NOR': 'NOK', 'NZL': 'NZD', 'POL': 'PLN', 'RUS': 'RUB', 'SDR': 'XDR', 'SWE': 'SEK', 'TUR': 'TRY', 'ZAF': 'ZAR' }
mit
bmazin/SDR
Projects/ChannelizerSim/legacy/bin_width_1st_stage.py
1
1524
import matplotlib.pyplot as plt import scipy.signal import numpy as np import math import random from matplotlib.backends.backend_pdf import PdfPages samples = 51200 L = samples/512 fs = 512e6 dt = 1/fs time = [i*dt for i in range(samples)] def pfb_fir(x): N = len(x) T = 4 L = 512 bin_width_scale = 2.5 dx = T*math.pi/L/T X = np.array([n*dx-T*math.pi/2 for n in range(T*L)]) coeff = np.sinc(bin_width_scale*X/math.pi)*np.hanning(T*L) y = np.array([0+0j]*(N-T*L)) for n in range((T-1)*L, N): m = n%L coeff_sub = coeff[L*T-m::-L] y[n-T*L] = (x[n-(T-1)*L:n+L:L]*coeff_sub).sum() return y R = 100/5 #freqs = [i*1e5 + 6.0e6 for i in range(R)] freqs = [i*5e4 + 6.0e6 for i in range(R*8)] bin = [] bin_pfb = [] for f in freqs: print f signal = np.array([complex(math.cos(2*math.pi*f*t), math.sin(2*math.pi*f*t)) for t in time]) y = pfb_fir(signal) bin_pfb.append(np.fft.fft(y[0:512])[10]) bin = np.array(bin) bin_pfb = np.array(bin_pfb) freqs = np.array(freqs)/1e6 b = scipy.signal.firwin(20, cutoff=0.125, window="hanning") w,h = scipy.signal.freqz(b,1, 4*R, whole=1) h = np.array(h[2*R:4*R].tolist()+h[0:2*R].tolist()) #h = np.array(h[20:40].tolist()+h[0:20].tolist()) fig = plt.figure() ax0 = fig.add_subplot(111) #ax0.plot(freqs, abs(fir9), '.', freqs, abs(fir10), '.', freqs, abs(fir11), '.') ax0.plot(freqs, 10*np.log10(abs(bin_pfb)/512), 'k-') ax0.set_xlabel('Frequency (MHz)') ax0.set_ylabel('Gain (dB)') ax0.set_ylim((-50,0)) plt.show() #ax0.axvline(x = 10, linewidth=1, color='k')
gpl-2.0
Sweetgrassbuffalo/ReactionSweeGrass-v2
.meteor/local/dev_bundle/python/Lib/os2emxpath.py
24
4635
# Module 'os2emxpath' -- common operations on OS/2 pathnames """Common pathname manipulations, OS/2 EMX version. Instead of importing this module directly, import os and refer to this module as os.path. """ import os import stat from genericpath import * from genericpath import _unicode from ntpath import (expanduser, expandvars, isabs, islink, splitdrive, splitext, split, walk) __all__ = ["normcase","isabs","join","splitdrive","split","splitext", "basename","dirname","commonprefix","getsize","getmtime", "getatime","getctime", "islink","exists","lexists","isdir","isfile", "ismount","walk","expanduser","expandvars","normpath","abspath", "splitunc","curdir","pardir","sep","pathsep","defpath","altsep", "extsep","devnull","realpath","supports_unicode_filenames"] # strings representing various path-related bits and pieces curdir = '.' pardir = '..' extsep = '.' sep = '/' altsep = '\\' pathsep = ';' defpath = '.;C:\\bin' devnull = 'nul' # Normalize the case of a pathname and map slashes to backslashes. # Other normalizations (such as optimizing '../' away) are not done # (this is done by normpath). def normcase(s): """Normalize case of pathname. Makes all characters lowercase and all altseps into seps.""" return s.replace('\\', '/').lower() # Join two (or more) paths. def join(a, *p): """Join two or more pathname components, inserting sep as needed""" path = a for b in p: if isabs(b): path = b elif path == '' or path[-1:] in '/\\:': path = path + b else: path = path + '/' + b return path # Parse UNC paths def splitunc(p): """Split a pathname into UNC mount point and relative path specifiers. Return a 2-tuple (unc, rest); either part may be empty. If unc is not empty, it has the form '//host/mount' (or similar using backslashes). unc+rest is always the input path. Paths containing drive letters never have a UNC part. """ if p[1:2] == ':': return '', p # Drive letter present firstTwo = p[0:2] if firstTwo == '/' * 2 or firstTwo == '\\' * 2: # is a UNC path: # vvvvvvvvvvvvvvvvvvvv equivalent to drive letter # \\machine\mountpoint\directories... # directory ^^^^^^^^^^^^^^^ normp = normcase(p) index = normp.find('/', 2) if index == -1: ##raise RuntimeError, 'illegal UNC path: "' + p + '"' return ("", p) index = normp.find('/', index + 1) if index == -1: index = len(p) return p[:index], p[index:] return '', p # Return the tail (basename) part of a path. def basename(p): """Returns the final component of a pathname""" return split(p)[1] # Return the head (dirname) part of a path. def dirname(p): """Returns the directory component of a pathname""" return split(p)[0] # alias exists to lexists lexists = exists # Is a path a directory? # Is a path a mount point? Either a root (with or without drive letter) # or a UNC path with at most a / or \ after the mount point. def ismount(path): """Test whether a path is a mount point (defined as root of drive)""" unc, rest = splitunc(path) if unc: return rest in ("", "/", "\\") p = splitdrive(path)[1] return len(p) == 1 and p[0] in '/\\' # Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. def normpath(path): """Normalize path, eliminating double slashes, etc.""" path = path.replace('\\', '/') prefix, path = splitdrive(path) while path[:1] == '/': prefix = prefix + '/' path = path[1:] comps = path.split('/') i = 0 while i < len(comps): if comps[i] == '.': del comps[i] elif comps[i] == '..' and i > 0 and comps[i-1] not in ('', '..'): del comps[i-1:i+1] i = i - 1 elif comps[i] == '' and i > 0 and comps[i-1] != '': del comps[i] else: i = i + 1 # If the path is now empty, substitute '.' if not prefix and not comps: comps.append('.') return prefix + '/'.join(comps) # Return an absolute path. def abspath(path): """Return the absolute version of a path""" if not isabs(path): if isinstance(path, _unicode): cwd = os.getcwdu() else: cwd = os.getcwd() path = join(cwd, path) return normpath(path) # realpath is a no-op on systems without islink support realpath = abspath supports_unicode_filenames = False
gpl-3.0
maxalbert/bokeh
bokeh/sphinxext/bokeh_palette.py
46
2209
""" Generate color representations of all known Bokeh palettes. Usage ----- This directive takes no arguments. """ from __future__ import absolute_import from docutils import nodes import jinja2 from sphinx.locale import _ from sphinx.util.compat import Directive from bokeh.palettes import brewer BREWER_TEMPLATE = jinja2.Template(u""" <table> <tr> <th> {{ name }} </th> </tr> {% for number in numbers %} <tr> <td height='20px' width='30px'> {{ number }} </td> {% for color in palette[number] %} <td height="20px" width="20px" style="background-color: {{ color }};"/> {% endfor %} </tr> {% endfor %} </table> """) class bokeh_palette(nodes.General, nodes.Element): pass class BokehPaletteDirective(Directive): has_content = False required_arguments = 1 def run(self): node = bokeh_palette() node['module'] = self.arguments[0] return [node] def html_visit_bokeh_palette(self, node): # NOTE: currently only handles the existing Brewer palettes names = sorted(brewer) for name in names: palette = brewer[name] numbers = sorted(palette) html = BREWER_TEMPLATE.render(name=name, numbers=numbers, palette=palette) self.body.append(html) raise nodes.SkipNode def latex_visit_bokeh_palette(self, node): self.body.append(_('[palette: %s]' % node['module'])) raise nodes.SkipNode def texinfo_visit_bokeh_palette(self, node): self.body.append(_('[palette: %s]' % node['module'])) raise nodes.SkipNode def text_visit_bokeh_palette(self, node): self.body.append(_('[palette: %s]' % node['module'])) raise nodes.SkipNode def man_visit_bokeh_palette(self, node): self.body.append(_('[palette: %s]' % node['module'])) raise nodes.SkipNode def setup(app): app.add_node(bokeh_palette, html=(html_visit_bokeh_palette, None), latex=(latex_visit_bokeh_palette, None), texinfo=(texinfo_visit_bokeh_palette, None), text=(text_visit_bokeh_palette, None), man=(man_visit_bokeh_palette, None)) app.add_directive('bokeh-palette', BokehPaletteDirective)
bsd-3-clause
kingctan/Misago
misago/threads/tests/test_threadslist_view.py
8
6188
from django.test import TestCase from misago.threads.moderation import ModerationError from misago.threads.views.generic.threads import Actions, Sorting from misago.users.testutils import AuthenticatedUserTestCase class MockRequest(object): def __init__(self, user, method='GET', POST=None): self.POST = POST or {} self.user = user self.session = {} self.path = '/cool-threads/' class MockActions(Actions): def get_available_actions(self, kwargs): return [] def action_test(self): pass class ActionsTests(AuthenticatedUserTestCase): def test_resolve_valid_action(self): """resolve_action returns valid action""" actions = MockActions(user=self.user) actions.available_actions = [{ 'action': 'test', 'name': "Test action" }] resolution = actions.resolve_action(MockRequest( user=self.user, POST={'action': 'test'}, )) self.assertEqual(resolution[0], actions.action_test) self.assertIsNone(resolution[1]) def test_resolve_arg_action(self): """resolve_action returns valid action and argument""" actions = MockActions(user=self.user) actions.available_actions = [{ 'action': 'test:1234', 'name': "Test action" }] resolution = actions.resolve_action(MockRequest( user=self.user, POST={'action': 'test:1234'}, )) self.assertEqual(resolution[0], actions.action_test) self.assertEqual(resolution[1], '1234') def test_resolve_invalid_action(self): """resolve_action handles invalid actions gracefully""" actions = MockActions(user=self.user) actions.available_actions = [{ 'action': 'test', 'name': "Test action" }] with self.assertRaises(ModerationError): resolution = actions.resolve_action(MockRequest( user=self.user, POST={'action': 'test:1234'}, )) with self.assertRaises(ModerationError): resolution = actions.resolve_action(MockRequest( user=self.user, POST={'action': 'test:1234'}, )) actions.available_actions = [{ 'action': 'test:123', 'name': "Test action" }] with self.assertRaises(ModerationError): resolution = actions.resolve_action(MockRequest( user=self.user, POST={'action': 'test'}, )) with self.assertRaises(ModerationError): resolution = actions.resolve_action(MockRequest( user=self.user, POST={'action': 'test:'}, )) with self.assertRaises(ModerationError): resolution = actions.resolve_action(MockRequest( user=self.user, POST={'action': 'test:321'}, )) def test_clean_selection(self): """clean_selection clears valid input""" actions = MockActions(user=self.user) self.assertEqual(actions.clean_selection(['1', '-', '9']), [1, 9]) def test_clean_invalid_selection(self): """clean_selection raises exception for invalid/empty input""" actions = MockActions(user=self.user) with self.assertRaises(ModerationError): actions.clean_selection([]) with self.assertRaises(ModerationError): actions.clean_selection(['abc']) def get_list(self): """get_list returns list of available actions""" actions = MockActions(user=self.user) actions.available_actions = [{ 'action': 'test:123', 'name': "Test action" }] self.assertEqual(actions.get_list(), actions.available_actions) def get_selected_ids(self): """get_selected_ids returns list of selected items""" actions = MockActions(user=self.user) actions.selected_ids = [1, 2, 4, 5, 6] self.assertEqual(actions.get_selected_ids(), actions.selected_ids) class SortingTests(TestCase): def setUp(self): self.sorting = Sorting('misago:forum', { 'forum_slug': "test-forum", 'forum_id': 42, }) def test_clean_kwargs_removes_default_sorting(self): """clean_kwargs removes default sorting""" default_sorting = self.sorting.sortings[0]['method'] cleaned_kwargs = self.sorting.clean_kwargs({'sort': default_sorting}) cleaned_kwargs['pie'] = 'yum-yum' self.assertEqual(cleaned_kwargs, {'pie': 'yum-yum'}) def test_clean_kwargs_removes_invalid_sorting(self): """clean_kwargs removes invalid sorting""" default_sorting = self.sorting.sortings[0]['method'] cleaned_kwargs = self.sorting.clean_kwargs({'sort': 'bad-sort'}) cleaned_kwargs['pie'] = 'yum-yum' self.assertEqual(cleaned_kwargs, {'pie': 'yum-yum'}) def test_clean_kwargs_preserves_valid_sorting(self): """clean_kwargs preserves valid sorting""" default_sorting = self.sorting.sortings[0]['method'] cleaned_kwargs = self.sorting.clean_kwargs({'sort': 'oldest'}) cleaned_kwargs['pie'] = 'yum-yum' self.assertEqual(cleaned_kwargs, {'sort': 'oldest', 'pie': 'yum-yum'}) def test_set_sorting_sets_valid_method(self): """set_sorting sets valid sorting""" for sorting in self.sorting.sortings: self.sorting.set_sorting(sorting['method']) self.assertEqual(sorting, self.sorting.sorting) self.assertEqual(sorting['name'], self.sorting.name) def test_choices(self): """choices returns set of valid choices""" for sorting in self.sorting.sortings: self.sorting.set_sorting(sorting['method']) choices = [choice['name'] for choice in self.sorting.choices()] self.assertNotIn(sorting['name'], choices) for other_sorting in self.sorting.sortings: if other_sorting != sorting: self.assertIn(other_sorting['name'], choices)
gpl-2.0
doctorzeb8/django-era
era/tests/test_functools.py
1
1525
from ..utils.functools import unidec, pluck, separate, pick, omit, truthful, avg from .base import SimpleTestCase, IsOkTestCase class UnidecTestCase(IsOkTestCase): def test_default_behaviour(self): self.assertOk( unidec(lambda fn, a: fn(a) + 'k') \ (lambda a: a[1:])('oo')) def test_with_params(self): self.assertOk( unidec(lambda fn, a, **f: f['w'] + fn(a))(w='o') \ (lambda a: a[1:])('kk')) class AvgTestCase(SimpleTestCase): def test(self): self.assertEqual(avg(4, 6, 8), 6) class SeparateTestCase(SimpleTestCase): def test(self): [even, odd] = separate(lambda x: bool(x % 2), [1, 2, 3, 4, 5]) self.assertEqual(even, [2, 4]) self.assertEqual(odd, [1, 3, 5]) class DictCopyTestCase(SimpleTestCase): def test_pick(self): self.assertEqual( pick({0: 1, 2: 4}, 0), {0: 1}) def test_omit(self): self.assertEqual( omit({0: 1, 2: 4}, 0), {2: 4}) def test_truthful(self): self.assertEqual( truthful({1: True, 2: False, 3: 'yes', 4: []}), {1: True, 3: 'yes'}) class PluckTestCase(SimpleTestCase): def test_dict(self): self.assertEqual( pluck([{0: 0}, {0: 1, 1: 1}, {0: 2, 1: 1, 2: 2}], 0), [0, 1, 2]) def test_obj(self): class O: def __init__(self, x): self.x = x self.assertEqual( pluck([O(1), O(2), O(3)], 'x'), [1, 2, 3])
mit
wujuguang/sentry
tests/integration/tests.py
9
10414
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function import datetime import json import logging import mock import zlib from django.conf import settings from django.core.urlresolvers import reverse from django.test.utils import override_settings from django.utils import timezone from gzip import GzipFile from exam import fixture from raven import Client from sentry.models import Group, Event from sentry.testutils import TestCase, TransactionTestCase from sentry.testutils.helpers import get_auth_header from sentry.utils.compat import StringIO from sentry.utils.settings import ( validate_settings, ConfigurationError, import_string) DEPENDENCY_TEST_DATA = { "postgresql": ('DATABASES', 'psycopg2.extensions', "database engine", "django.db.backends.postgresql_psycopg2", { 'default': { 'ENGINE': "django.db.backends.postgresql_psycopg2", 'NAME': 'test', 'USER': 'root', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '' } }), "mysql": ('DATABASES', 'MySQLdb', "database engine", "django.db.backends.mysql", { 'default': { 'ENGINE': "django.db.backends.mysql", 'NAME': 'test', 'USER': 'root', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '' } }), "oracle": ('DATABASES', 'cx_Oracle', "database engine", "django.db.backends.oracle", { 'default': { 'ENGINE': "django.db.backends.oracle", 'NAME': 'test', 'USER': 'root', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '' } }), "memcache": ('CACHES', 'memcache', "caching backend", "django.core.cache.backends.memcached.MemcachedCache", { 'default': { 'BACKEND': "django.core.cache.backends.memcached.MemcachedCache", 'LOCATION': '127.0.0.1:11211', } }), "pylibmc": ('CACHES', 'pylibmc', "caching backend", "django.core.cache.backends.memcached.PyLibMCCache", { 'default': { 'BACKEND': "django.core.cache.backends.memcached.PyLibMCCache", 'LOCATION': '127.0.0.1:11211', } }), } class AssertHandler(logging.Handler): def emit(self, entry): raise AssertionError(entry.message) class RavenIntegrationTest(TransactionTestCase): """ This mocks the test server and specifically tests behavior that would happen between Raven <--> Sentry over HTTP communication. """ def setUp(self): self.user = self.create_user('coreapi@example.com') self.project = self.create_project() self.pm = self.project.team.member_set.get_or_create(user=self.user)[0] self.pk = self.project.key_set.get_or_create()[0] self.configure_sentry_errors() def configure_sentry_errors(self): assert_handler = AssertHandler() sentry_errors = logging.getLogger('sentry.errors') sentry_errors.addHandler(assert_handler) sentry_errors.setLevel(logging.DEBUG) def remove_handler(): sentry_errors.handlers.pop(sentry_errors.handlers.index(assert_handler)) self.addCleanup(remove_handler) def sendRemote(self, url, data, headers={}): content_type = headers.pop('Content-Type', None) headers = dict(('HTTP_' + k.replace('-', '_').upper(), v) for k, v in headers.iteritems()) resp = self.client.post( reverse('sentry-api-store', args=[self.pk.project_id]), data=data, content_type=content_type, **headers) self.assertEquals(resp.status_code, 200, resp.content) @mock.patch('raven.base.Client.send_remote') def test_basic(self, send_remote): send_remote.side_effect = self.sendRemote client = Client( dsn='http://%s:%s@localhost:8000/%s' % ( self.pk.public_key, self.pk.secret_key, self.pk.project_id) ) with self.tasks(): client.capture('Message', message='foo') send_remote.assert_called_once() self.assertEquals(Group.objects.count(), 1) group = Group.objects.get() self.assertEquals(group.event_set.count(), 1) instance = group.event_set.get() self.assertEquals(instance.message, 'foo') class SentryRemoteTest(TestCase): @fixture def path(self): return reverse('sentry-api-store') def test_minimal(self): kwargs = {'message': 'hello'} resp = self._postWithHeader(kwargs) assert resp.status_code == 200, resp.content event_id = json.loads(resp.content)['id'] instance = Event.objects.get(event_id=event_id) assert instance.message == 'hello' def test_timestamp(self): timestamp = timezone.now().replace(microsecond=0, tzinfo=timezone.utc) - datetime.timedelta(hours=1) kwargs = {u'message': 'hello', 'timestamp': timestamp.strftime('%s.%f')} resp = self._postWithSignature(kwargs) self.assertEquals(resp.status_code, 200, resp.content) instance = Event.objects.get() self.assertEquals(instance.message, 'hello') self.assertEquals(instance.datetime, timestamp) group = instance.group self.assertEquals(group.first_seen, timestamp) self.assertEquals(group.last_seen, timestamp) def test_timestamp_as_iso(self): timestamp = timezone.now().replace(microsecond=0, tzinfo=timezone.utc) - datetime.timedelta(hours=1) kwargs = {u'message': 'hello', 'timestamp': timestamp.strftime('%Y-%m-%dT%H:%M:%S.%f')} resp = self._postWithSignature(kwargs) self.assertEquals(resp.status_code, 200, resp.content) instance = Event.objects.get() self.assertEquals(instance.message, 'hello') self.assertEquals(instance.datetime, timestamp) group = instance.group self.assertEquals(group.first_seen, timestamp) self.assertEquals(group.last_seen, timestamp) def test_ungzipped_data(self): kwargs = {'message': 'hello'} resp = self._postWithSignature(kwargs) self.assertEquals(resp.status_code, 200) instance = Event.objects.get() self.assertEquals(instance.message, 'hello') @override_settings(SENTRY_ALLOW_ORIGIN='getsentry.com') def test_correct_data_with_get(self): kwargs = {'message': 'hello'} resp = self._getWithReferer(kwargs) self.assertEquals(resp.status_code, 200, resp.content) instance = Event.objects.get() self.assertEquals(instance.message, 'hello') @override_settings(SENTRY_ALLOW_ORIGIN='getsentry.com') def test_get_without_referer(self): kwargs = {'message': 'hello'} resp = self._getWithReferer(kwargs, referer=None, protocol='4') self.assertEquals(resp.status_code, 400, resp.content) @override_settings(SENTRY_ALLOW_ORIGIN='*') def test_get_without_referer_allowed(self): kwargs = {'message': 'hello'} resp = self._getWithReferer(kwargs, referer=None, protocol='4') self.assertEquals(resp.status_code, 200, resp.content) def test_signature(self): kwargs = {'message': 'hello'} resp = self._postWithSignature(kwargs) self.assertEquals(resp.status_code, 200, resp.content) instance = Event.objects.get() self.assertEquals(instance.message, 'hello') def test_content_encoding_deflate(self): kwargs = {'message': 'hello'} message = zlib.compress(json.dumps(kwargs)) key = self.projectkey.public_key secret = self.projectkey.secret_key with self.tasks(): resp = self.client.post( self.path, message, content_type='application/octet-stream', HTTP_CONTENT_ENCODING='deflate', HTTP_X_SENTRY_AUTH=get_auth_header('_postWithHeader', key, secret), ) assert resp.status_code == 200, resp.content event_id = json.loads(resp.content)['id'] instance = Event.objects.get(event_id=event_id) assert instance.message == 'hello' def test_content_encoding_gzip(self): kwargs = {'message': 'hello'} message = json.dumps(kwargs) fp = StringIO() try: f = GzipFile(fileobj=fp, mode='w') f.write(message) finally: f.close() key = self.projectkey.public_key secret = self.projectkey.secret_key with self.tasks(): resp = self.client.post( self.path, fp.getvalue(), content_type='application/octet-stream', HTTP_CONTENT_ENCODING='gzip', HTTP_X_SENTRY_AUTH=get_auth_header('_postWithHeader', key, secret), ) assert resp.status_code == 200, resp.content event_id = json.loads(resp.content)['id'] instance = Event.objects.get(event_id=event_id) assert instance.message == 'hello' class DepdendencyTest(TestCase): def raise_import_error(self, package): def callable(package_name): if package_name != package: return import_string(package_name) raise ImportError("No module named %s" % (package,)) return callable @mock.patch('django.conf.settings', mock.Mock()) @mock.patch('sentry.utils.settings.import_string') def validate_dependency(self, key, package, dependency_type, dependency, setting_value, import_string): import_string.side_effect = self.raise_import_error(package) with self.settings(**{key: setting_value}): with self.assertRaises(ConfigurationError): validate_settings(settings) def test_validate_fails_on_postgres(self): self.validate_dependency(*DEPENDENCY_TEST_DATA['postgresql']) def test_validate_fails_on_mysql(self): self.validate_dependency(*DEPENDENCY_TEST_DATA['mysql']) def test_validate_fails_on_oracle(self): self.validate_dependency(*DEPENDENCY_TEST_DATA['oracle']) def test_validate_fails_on_memcache(self): self.validate_dependency(*DEPENDENCY_TEST_DATA['memcache']) def test_validate_fails_on_pylibmc(self): self.validate_dependency(*DEPENDENCY_TEST_DATA['pylibmc'])
bsd-3-clause
SlothMellow/android_kernel_moto_shamu
tools/perf/scripts/python/syscall-counts.py
11181
1522
# system call counts # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40s %10d\n" % (syscall_name(id), val),
gpl-2.0
yavwa/Shilling
test/functional/rpc_decodescript.py
13
13449
#!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test decoding scripts via decodescript RPC command.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import * from io import BytesIO class DecodeScriptTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def decodescript_script_sig(self): signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' push_signature = '48' + signature public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' push_public_key = '21' + public_key # below are test cases for all of the standard transaction types # 1) P2PK scriptSig # the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack rpc_result = self.nodes[0].decodescript(push_signature) assert_equal(signature, rpc_result['asm']) # 2) P2PKH scriptSig rpc_result = self.nodes[0].decodescript(push_signature + push_public_key) assert_equal(signature + ' ' + public_key, rpc_result['asm']) # 3) multisig scriptSig # this also tests the leading portion of a P2SH multisig scriptSig # OP_0 <A sig> <B sig> rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature) assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm']) # 4) P2SH scriptSig # an empty P2SH redeemScript is valid and makes for a very simple test case. # thus, such a spending scriptSig would just need to pass the outer redeemScript # hash test and leave true on the top of the stack. rpc_result = self.nodes[0].decodescript('5100') assert_equal('1 0', rpc_result['asm']) # 5) null data scriptSig - no such thing because null data scripts can not be spent. # thus, no test case for that standard transaction type is here. def decodescript_script_pub_key(self): public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' push_public_key = '21' + public_key public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777' push_public_key_hash = '14' + public_key_hash # below are test cases for all of the standard transaction types # 1) P2PK scriptPubKey # <pubkey> OP_CHECKSIG rpc_result = self.nodes[0].decodescript(push_public_key + 'ac') assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm']) # 2) P2PKH scriptPubKey # OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac') assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm']) # 3) multisig scriptPubKey # <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG # just imagine that the pub keys used below are different. # for our purposes here it does not matter that they are the same even though it is unrealistic. rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae') assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm']) # 4) P2SH scriptPubKey # OP_HASH160 <Hash160(redeemScript)> OP_EQUAL. # push_public_key_hash here should actually be the hash of a redeem script. # but this works the same for purposes of this test. rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87') assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm']) # 5) null data scriptPubKey # use a signature look-alike here to make sure that we do not decode random data as a signature. # this matters if/when signature sighash decoding comes along. # would want to make sure that no such decoding takes place in this case. signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' # OP_RETURN <data> rpc_result = self.nodes[0].decodescript('6a' + signature_imposter) assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm']) # 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here. # OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY. # just imagine that the pub keys used below are different. # for our purposes here it does not matter that they are the same even though it is unrealistic. # # OP_IF # <receiver-pubkey> OP_CHECKSIGVERIFY # OP_ELSE # <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP # OP_ENDIF # <sender-pubkey> OP_CHECKSIG # # lock until block 500,000 rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac') assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm']) def decoderawtransaction_asm_sighashtype(self): """Test decoding scripts via RPC command "decoderawtransaction". This test is in with the "decodescript" tests because they are testing the same "asm" script decodes. """ # this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm']) # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs. # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc # verify that we have not altered scriptPubKey decoding. tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid']) assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm']) assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) txSave = CTransaction() txSave.deserialize(BytesIO(hex_str_to_bytes(tx))) # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm']) # verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) # some more full transaction tests of varying specific scriptSigs. used instead of # tests in decodescript_script_sig because the decodescript RPC is specifically # for working on scriptPubKeys (argh!). push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)] signature = push_signature[2:] der_signature = signature[:-2] signature_sighash_decoded = der_signature + '[ALL]' signature_2 = der_signature + '82' push_signature_2 = '48' + signature_2 signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]' # 1) P2PK scriptSig txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature) rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # make sure that the sighash decodes come out correctly for a more complex / lesser used case. txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2) rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # 2) multisig scriptSig txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2) rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # 3) test a scriptSig that contains more than push operations. # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it. txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101') rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize())) assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm']) def run_test(self): self.decodescript_script_sig() self.decodescript_script_pub_key() self.decoderawtransaction_asm_sighashtype() if __name__ == '__main__': DecodeScriptTest().main()
mit
yoziru-desu/airflow
airflow/operators/sqlite_operator.py
9
1088
import logging from airflow.hooks import SqliteHook from airflow.models import BaseOperator from airflow.utils import apply_defaults class SqliteOperator(BaseOperator): """ Executes sql code in a specific Sqlite database :param sqlite_conn_id: reference to a specific sqlite database :type sqlite_conn_id: string :param sql: the sql code to be executed :type sql: string or string pointing to a template file. File must have a '.sql' extensions. """ template_fields = ('sql',) template_ext = ('.sql',) ui_color = '#cdaaed' @apply_defaults def __init__( self, sql, sqlite_conn_id='sqlite_default', parameters=None, *args, **kwargs): super(SqliteOperator, self).__init__(*args, **kwargs) self.sqlite_conn_id = sqlite_conn_id self.sql = sql self.parameters = parameters or [] def execute(self, context): logging.info('Executing: ' + self.sql) hook = SqliteHook(sqlite_conn_id=self.sqlite_conn_id) hook.run(self.sql, parameters=self.parameters)
apache-2.0
fkolacek/FIT-VUT
bp-revok/python/lib/python2.7/test/test_iter.py
39
28949
# Test iterators. import unittest from test.test_support import run_unittest, TESTFN, unlink, have_unicode, \ check_py3k_warnings, cpython_only # Test result of triple loop (too big to inline) TRIPLETS = [(0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 2, 0), (0, 2, 1), (0, 2, 2), (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 1, 0), (1, 1, 1), (1, 1, 2), (1, 2, 0), (1, 2, 1), (1, 2, 2), (2, 0, 0), (2, 0, 1), (2, 0, 2), (2, 1, 0), (2, 1, 1), (2, 1, 2), (2, 2, 0), (2, 2, 1), (2, 2, 2)] # Helper classes class BasicIterClass: def __init__(self, n): self.n = n self.i = 0 def next(self): res = self.i if res >= self.n: raise StopIteration self.i = res + 1 return res class IteratingSequenceClass: def __init__(self, n): self.n = n def __iter__(self): return BasicIterClass(self.n) class SequenceClass: def __init__(self, n): self.n = n def __getitem__(self, i): if 0 <= i < self.n: return i else: raise IndexError # Main test suite class TestCase(unittest.TestCase): # Helper to check that an iterator returns a given sequence def check_iterator(self, it, seq): res = [] while 1: try: val = it.next() except StopIteration: break res.append(val) self.assertEqual(res, seq) # Helper to check that a for loop generates a given sequence def check_for_loop(self, expr, seq): res = [] for val in expr: res.append(val) self.assertEqual(res, seq) # Test basic use of iter() function def test_iter_basic(self): self.check_iterator(iter(range(10)), range(10)) # Test that iter(iter(x)) is the same as iter(x) def test_iter_idempotency(self): seq = range(10) it = iter(seq) it2 = iter(it) self.assertTrue(it is it2) # Test that for loops over iterators work def test_iter_for_loop(self): self.check_for_loop(iter(range(10)), range(10)) # Test several independent iterators over the same list def test_iter_independence(self): seq = range(3) res = [] for i in iter(seq): for j in iter(seq): for k in iter(seq): res.append((i, j, k)) self.assertEqual(res, TRIPLETS) # Test triple list comprehension using iterators def test_nested_comprehensions_iter(self): seq = range(3) res = [(i, j, k) for i in iter(seq) for j in iter(seq) for k in iter(seq)] self.assertEqual(res, TRIPLETS) # Test triple list comprehension without iterators def test_nested_comprehensions_for(self): seq = range(3) res = [(i, j, k) for i in seq for j in seq for k in seq] self.assertEqual(res, TRIPLETS) # Test a class with __iter__ in a for loop def test_iter_class_for(self): self.check_for_loop(IteratingSequenceClass(10), range(10)) # Test a class with __iter__ with explicit iter() def test_iter_class_iter(self): self.check_iterator(iter(IteratingSequenceClass(10)), range(10)) # Test for loop on a sequence class without __iter__ def test_seq_class_for(self): self.check_for_loop(SequenceClass(10), range(10)) # Test iter() on a sequence class without __iter__ def test_seq_class_iter(self): self.check_iterator(iter(SequenceClass(10)), range(10)) # Test a new_style class with __iter__ but no next() method def test_new_style_iter_class(self): class IterClass(object): def __iter__(self): return self self.assertRaises(TypeError, iter, IterClass()) # Test two-argument iter() with callable instance def test_iter_callable(self): class C: def __init__(self): self.i = 0 def __call__(self): i = self.i self.i = i + 1 if i > 100: raise IndexError # Emergency stop return i self.check_iterator(iter(C(), 10), range(10)) # Test two-argument iter() with function def test_iter_function(self): def spam(state=[0]): i = state[0] state[0] = i+1 return i self.check_iterator(iter(spam, 10), range(10)) # Test two-argument iter() with function that raises StopIteration def test_iter_function_stop(self): def spam(state=[0]): i = state[0] if i == 10: raise StopIteration state[0] = i+1 return i self.check_iterator(iter(spam, 20), range(10)) # Test exception propagation through function iterator def test_exception_function(self): def spam(state=[0]): i = state[0] state[0] = i+1 if i == 10: raise RuntimeError return i res = [] try: for x in iter(spam, 20): res.append(x) except RuntimeError: self.assertEqual(res, range(10)) else: self.fail("should have raised RuntimeError") # Test exception propagation through sequence iterator def test_exception_sequence(self): class MySequenceClass(SequenceClass): def __getitem__(self, i): if i == 10: raise RuntimeError return SequenceClass.__getitem__(self, i) res = [] try: for x in MySequenceClass(20): res.append(x) except RuntimeError: self.assertEqual(res, range(10)) else: self.fail("should have raised RuntimeError") # Test for StopIteration from __getitem__ def test_stop_sequence(self): class MySequenceClass(SequenceClass): def __getitem__(self, i): if i == 10: raise StopIteration return SequenceClass.__getitem__(self, i) self.check_for_loop(MySequenceClass(20), range(10)) # Test a big range def test_iter_big_range(self): self.check_for_loop(iter(range(10000)), range(10000)) # Test an empty list def test_iter_empty(self): self.check_for_loop(iter([]), []) # Test a tuple def test_iter_tuple(self): self.check_for_loop(iter((0,1,2,3,4,5,6,7,8,9)), range(10)) # Test an xrange def test_iter_xrange(self): self.check_for_loop(iter(xrange(10)), range(10)) # Test a string def test_iter_string(self): self.check_for_loop(iter("abcde"), ["a", "b", "c", "d", "e"]) # Test a Unicode string if have_unicode: def test_iter_unicode(self): self.check_for_loop(iter(unicode("abcde")), [unicode("a"), unicode("b"), unicode("c"), unicode("d"), unicode("e")]) # Test a directory def test_iter_dict(self): dict = {} for i in range(10): dict[i] = None self.check_for_loop(dict, dict.keys()) # Test a file def test_iter_file(self): f = open(TESTFN, "w") try: for i in range(5): f.write("%d\n" % i) finally: f.close() f = open(TESTFN, "r") try: self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"]) self.check_for_loop(f, []) finally: f.close() try: unlink(TESTFN) except OSError: pass # Test list()'s use of iterators. def test_builtin_list(self): self.assertEqual(list(SequenceClass(5)), range(5)) self.assertEqual(list(SequenceClass(0)), []) self.assertEqual(list(()), []) self.assertEqual(list(range(10, -1, -1)), range(10, -1, -1)) d = {"one": 1, "two": 2, "three": 3} self.assertEqual(list(d), d.keys()) self.assertRaises(TypeError, list, list) self.assertRaises(TypeError, list, 42) f = open(TESTFN, "w") try: for i in range(5): f.write("%d\n" % i) finally: f.close() f = open(TESTFN, "r") try: self.assertEqual(list(f), ["0\n", "1\n", "2\n", "3\n", "4\n"]) f.seek(0, 0) self.assertEqual(list(f), ["0\n", "1\n", "2\n", "3\n", "4\n"]) finally: f.close() try: unlink(TESTFN) except OSError: pass # Test tuples()'s use of iterators. def test_builtin_tuple(self): self.assertEqual(tuple(SequenceClass(5)), (0, 1, 2, 3, 4)) self.assertEqual(tuple(SequenceClass(0)), ()) self.assertEqual(tuple([]), ()) self.assertEqual(tuple(()), ()) self.assertEqual(tuple("abc"), ("a", "b", "c")) d = {"one": 1, "two": 2, "three": 3} self.assertEqual(tuple(d), tuple(d.keys())) self.assertRaises(TypeError, tuple, list) self.assertRaises(TypeError, tuple, 42) f = open(TESTFN, "w") try: for i in range(5): f.write("%d\n" % i) finally: f.close() f = open(TESTFN, "r") try: self.assertEqual(tuple(f), ("0\n", "1\n", "2\n", "3\n", "4\n")) f.seek(0, 0) self.assertEqual(tuple(f), ("0\n", "1\n", "2\n", "3\n", "4\n")) finally: f.close() try: unlink(TESTFN) except OSError: pass # Test filter()'s use of iterators. def test_builtin_filter(self): self.assertEqual(filter(None, SequenceClass(5)), range(1, 5)) self.assertEqual(filter(None, SequenceClass(0)), []) self.assertEqual(filter(None, ()), ()) self.assertEqual(filter(None, "abc"), "abc") d = {"one": 1, "two": 2, "three": 3} self.assertEqual(filter(None, d), d.keys()) self.assertRaises(TypeError, filter, None, list) self.assertRaises(TypeError, filter, None, 42) class Boolean: def __init__(self, truth): self.truth = truth def __nonzero__(self): return self.truth bTrue = Boolean(1) bFalse = Boolean(0) class Seq: def __init__(self, *args): self.vals = args def __iter__(self): class SeqIter: def __init__(self, vals): self.vals = vals self.i = 0 def __iter__(self): return self def next(self): i = self.i self.i = i + 1 if i < len(self.vals): return self.vals[i] else: raise StopIteration return SeqIter(self.vals) seq = Seq(*([bTrue, bFalse] * 25)) self.assertEqual(filter(lambda x: not x, seq), [bFalse]*25) self.assertEqual(filter(lambda x: not x, iter(seq)), [bFalse]*25) # Test max() and min()'s use of iterators. def test_builtin_max_min(self): self.assertEqual(max(SequenceClass(5)), 4) self.assertEqual(min(SequenceClass(5)), 0) self.assertEqual(max(8, -1), 8) self.assertEqual(min(8, -1), -1) d = {"one": 1, "two": 2, "three": 3} self.assertEqual(max(d), "two") self.assertEqual(min(d), "one") self.assertEqual(max(d.itervalues()), 3) self.assertEqual(min(iter(d.itervalues())), 1) f = open(TESTFN, "w") try: f.write("medium line\n") f.write("xtra large line\n") f.write("itty-bitty line\n") finally: f.close() f = open(TESTFN, "r") try: self.assertEqual(min(f), "itty-bitty line\n") f.seek(0, 0) self.assertEqual(max(f), "xtra large line\n") finally: f.close() try: unlink(TESTFN) except OSError: pass # Test map()'s use of iterators. def test_builtin_map(self): self.assertEqual(map(lambda x: x+1, SequenceClass(5)), range(1, 6)) d = {"one": 1, "two": 2, "three": 3} self.assertEqual(map(lambda k, d=d: (k, d[k]), d), d.items()) dkeys = d.keys() expected = [(i < len(d) and dkeys[i] or None, i, i < len(d) and dkeys[i] or None) for i in range(5)] # Deprecated map(None, ...) with check_py3k_warnings(): self.assertEqual(map(None, SequenceClass(5)), range(5)) self.assertEqual(map(None, d), d.keys()) self.assertEqual(map(None, d, SequenceClass(5), iter(d.iterkeys())), expected) f = open(TESTFN, "w") try: for i in range(10): f.write("xy" * i + "\n") # line i has len 2*i+1 finally: f.close() f = open(TESTFN, "r") try: self.assertEqual(map(len, f), range(1, 21, 2)) finally: f.close() try: unlink(TESTFN) except OSError: pass # Test zip()'s use of iterators. def test_builtin_zip(self): self.assertEqual(zip(), []) self.assertEqual(zip(*[]), []) self.assertEqual(zip(*[(1, 2), 'ab']), [(1, 'a'), (2, 'b')]) self.assertRaises(TypeError, zip, None) self.assertRaises(TypeError, zip, range(10), 42) self.assertRaises(TypeError, zip, range(10), zip) self.assertEqual(zip(IteratingSequenceClass(3)), [(0,), (1,), (2,)]) self.assertEqual(zip(SequenceClass(3)), [(0,), (1,), (2,)]) d = {"one": 1, "two": 2, "three": 3} self.assertEqual(d.items(), zip(d, d.itervalues())) # Generate all ints starting at constructor arg. class IntsFrom: def __init__(self, start): self.i = start def __iter__(self): return self def next(self): i = self.i self.i = i+1 return i f = open(TESTFN, "w") try: f.write("a\n" "bbb\n" "cc\n") finally: f.close() f = open(TESTFN, "r") try: self.assertEqual(zip(IntsFrom(0), f, IntsFrom(-100)), [(0, "a\n", -100), (1, "bbb\n", -99), (2, "cc\n", -98)]) finally: f.close() try: unlink(TESTFN) except OSError: pass self.assertEqual(zip(xrange(5)), [(i,) for i in range(5)]) # Classes that lie about their lengths. class NoGuessLen5: def __getitem__(self, i): if i >= 5: raise IndexError return i class Guess3Len5(NoGuessLen5): def __len__(self): return 3 class Guess30Len5(NoGuessLen5): def __len__(self): return 30 self.assertEqual(len(Guess3Len5()), 3) self.assertEqual(len(Guess30Len5()), 30) self.assertEqual(zip(NoGuessLen5()), zip(range(5))) self.assertEqual(zip(Guess3Len5()), zip(range(5))) self.assertEqual(zip(Guess30Len5()), zip(range(5))) expected = [(i, i) for i in range(5)] for x in NoGuessLen5(), Guess3Len5(), Guess30Len5(): for y in NoGuessLen5(), Guess3Len5(), Guess30Len5(): self.assertEqual(zip(x, y), expected) # Test reduces()'s use of iterators. def test_deprecated_builtin_reduce(self): with check_py3k_warnings(): self._test_builtin_reduce() def _test_builtin_reduce(self): from operator import add self.assertEqual(reduce(add, SequenceClass(5)), 10) self.assertEqual(reduce(add, SequenceClass(5), 42), 52) self.assertRaises(TypeError, reduce, add, SequenceClass(0)) self.assertEqual(reduce(add, SequenceClass(0), 42), 42) self.assertEqual(reduce(add, SequenceClass(1)), 0) self.assertEqual(reduce(add, SequenceClass(1), 42), 42) d = {"one": 1, "two": 2, "three": 3} self.assertEqual(reduce(add, d), "".join(d.keys())) # This test case will be removed if we don't have Unicode def test_unicode_join_endcase(self): # This class inserts a Unicode object into its argument's natural # iteration, in the 3rd position. class OhPhooey: def __init__(self, seq): self.it = iter(seq) self.i = 0 def __iter__(self): return self def next(self): i = self.i self.i = i+1 if i == 2: return unicode("fooled you!") return self.it.next() f = open(TESTFN, "w") try: f.write("a\n" + "b\n" + "c\n") finally: f.close() f = open(TESTFN, "r") # Nasty: string.join(s) can't know whether unicode.join() is needed # until it's seen all of s's elements. But in this case, f's # iterator cannot be restarted. So what we're testing here is # whether string.join() can manage to remember everything it's seen # and pass that on to unicode.join(). try: got = " - ".join(OhPhooey(f)) self.assertEqual(got, unicode("a\n - b\n - fooled you! - c\n")) finally: f.close() try: unlink(TESTFN) except OSError: pass if not have_unicode: def test_unicode_join_endcase(self): pass # Test iterators with 'x in y' and 'x not in y'. def test_in_and_not_in(self): for sc5 in IteratingSequenceClass(5), SequenceClass(5): for i in range(5): self.assertIn(i, sc5) for i in "abc", -1, 5, 42.42, (3, 4), [], {1: 1}, 3-12j, sc5: self.assertNotIn(i, sc5) self.assertRaises(TypeError, lambda: 3 in 12) self.assertRaises(TypeError, lambda: 3 not in map) d = {"one": 1, "two": 2, "three": 3, 1j: 2j} for k in d: self.assertIn(k, d) self.assertNotIn(k, d.itervalues()) for v in d.values(): self.assertIn(v, d.itervalues()) self.assertNotIn(v, d) for k, v in d.iteritems(): self.assertIn((k, v), d.iteritems()) self.assertNotIn((v, k), d.iteritems()) f = open(TESTFN, "w") try: f.write("a\n" "b\n" "c\n") finally: f.close() f = open(TESTFN, "r") try: for chunk in "abc": f.seek(0, 0) self.assertNotIn(chunk, f) f.seek(0, 0) self.assertIn((chunk + "\n"), f) finally: f.close() try: unlink(TESTFN) except OSError: pass # Test iterators with operator.countOf (PySequence_Count). def test_countOf(self): from operator import countOf self.assertEqual(countOf([1,2,2,3,2,5], 2), 3) self.assertEqual(countOf((1,2,2,3,2,5), 2), 3) self.assertEqual(countOf("122325", "2"), 3) self.assertEqual(countOf("122325", "6"), 0) self.assertRaises(TypeError, countOf, 42, 1) self.assertRaises(TypeError, countOf, countOf, countOf) d = {"one": 3, "two": 3, "three": 3, 1j: 2j} for k in d: self.assertEqual(countOf(d, k), 1) self.assertEqual(countOf(d.itervalues(), 3), 3) self.assertEqual(countOf(d.itervalues(), 2j), 1) self.assertEqual(countOf(d.itervalues(), 1j), 0) f = open(TESTFN, "w") try: f.write("a\n" "b\n" "c\n" "b\n") finally: f.close() f = open(TESTFN, "r") try: for letter, count in ("a", 1), ("b", 2), ("c", 1), ("d", 0): f.seek(0, 0) self.assertEqual(countOf(f, letter + "\n"), count) finally: f.close() try: unlink(TESTFN) except OSError: pass # Test iterators with operator.indexOf (PySequence_Index). def test_indexOf(self): from operator import indexOf self.assertEqual(indexOf([1,2,2,3,2,5], 1), 0) self.assertEqual(indexOf((1,2,2,3,2,5), 2), 1) self.assertEqual(indexOf((1,2,2,3,2,5), 3), 3) self.assertEqual(indexOf((1,2,2,3,2,5), 5), 5) self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 0) self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 6) self.assertEqual(indexOf("122325", "2"), 1) self.assertEqual(indexOf("122325", "5"), 5) self.assertRaises(ValueError, indexOf, "122325", "6") self.assertRaises(TypeError, indexOf, 42, 1) self.assertRaises(TypeError, indexOf, indexOf, indexOf) f = open(TESTFN, "w") try: f.write("a\n" "b\n" "c\n" "d\n" "e\n") finally: f.close() f = open(TESTFN, "r") try: fiter = iter(f) self.assertEqual(indexOf(fiter, "b\n"), 1) self.assertEqual(indexOf(fiter, "d\n"), 1) self.assertEqual(indexOf(fiter, "e\n"), 0) self.assertRaises(ValueError, indexOf, fiter, "a\n") finally: f.close() try: unlink(TESTFN) except OSError: pass iclass = IteratingSequenceClass(3) for i in range(3): self.assertEqual(indexOf(iclass, i), i) self.assertRaises(ValueError, indexOf, iclass, -1) # Test iterators with file.writelines(). def test_writelines(self): f = file(TESTFN, "w") try: self.assertRaises(TypeError, f.writelines, None) self.assertRaises(TypeError, f.writelines, 42) f.writelines(["1\n", "2\n"]) f.writelines(("3\n", "4\n")) f.writelines({'5\n': None}) f.writelines({}) # Try a big chunk too. class Iterator: def __init__(self, start, finish): self.start = start self.finish = finish self.i = self.start def next(self): if self.i >= self.finish: raise StopIteration result = str(self.i) + '\n' self.i += 1 return result def __iter__(self): return self class Whatever: def __init__(self, start, finish): self.start = start self.finish = finish def __iter__(self): return Iterator(self.start, self.finish) f.writelines(Whatever(6, 6+2000)) f.close() f = file(TESTFN) expected = [str(i) + "\n" for i in range(1, 2006)] self.assertEqual(list(f), expected) finally: f.close() try: unlink(TESTFN) except OSError: pass # Test iterators on RHS of unpacking assignments. def test_unpack_iter(self): a, b = 1, 2 self.assertEqual((a, b), (1, 2)) a, b, c = IteratingSequenceClass(3) self.assertEqual((a, b, c), (0, 1, 2)) try: # too many values a, b = IteratingSequenceClass(3) except ValueError: pass else: self.fail("should have raised ValueError") try: # not enough values a, b, c = IteratingSequenceClass(2) except ValueError: pass else: self.fail("should have raised ValueError") try: # not iterable a, b, c = len except TypeError: pass else: self.fail("should have raised TypeError") a, b, c = {1: 42, 2: 42, 3: 42}.itervalues() self.assertEqual((a, b, c), (42, 42, 42)) f = open(TESTFN, "w") lines = ("a\n", "bb\n", "ccc\n") try: for line in lines: f.write(line) finally: f.close() f = open(TESTFN, "r") try: a, b, c = f self.assertEqual((a, b, c), lines) finally: f.close() try: unlink(TESTFN) except OSError: pass (a, b), (c,) = IteratingSequenceClass(2), {42: 24} self.assertEqual((a, b, c), (0, 1, 42)) @cpython_only def test_ref_counting_behavior(self): class C(object): count = 0 def __new__(cls): cls.count += 1 return object.__new__(cls) def __del__(self): cls = self.__class__ assert cls.count > 0 cls.count -= 1 x = C() self.assertEqual(C.count, 1) del x self.assertEqual(C.count, 0) l = [C(), C(), C()] self.assertEqual(C.count, 3) try: a, b = iter(l) except ValueError: pass del l self.assertEqual(C.count, 0) # Make sure StopIteration is a "sink state". # This tests various things that weren't sink states in Python 2.2.1, # plus various things that always were fine. def test_sinkstate_list(self): # This used to fail a = range(5) b = iter(a) self.assertEqual(list(b), range(5)) a.extend(range(5, 10)) self.assertEqual(list(b), []) def test_sinkstate_tuple(self): a = (0, 1, 2, 3, 4) b = iter(a) self.assertEqual(list(b), range(5)) self.assertEqual(list(b), []) def test_sinkstate_string(self): a = "abcde" b = iter(a) self.assertEqual(list(b), ['a', 'b', 'c', 'd', 'e']) self.assertEqual(list(b), []) def test_sinkstate_sequence(self): # This used to fail a = SequenceClass(5) b = iter(a) self.assertEqual(list(b), range(5)) a.n = 10 self.assertEqual(list(b), []) def test_sinkstate_callable(self): # This used to fail def spam(state=[0]): i = state[0] state[0] = i+1 if i == 10: raise AssertionError, "shouldn't have gotten this far" return i b = iter(spam, 5) self.assertEqual(list(b), range(5)) self.assertEqual(list(b), []) def test_sinkstate_dict(self): # XXX For a more thorough test, see towards the end of: # http://mail.python.org/pipermail/python-dev/2002-July/026512.html a = {1:1, 2:2, 0:0, 4:4, 3:3} for b in iter(a), a.iterkeys(), a.iteritems(), a.itervalues(): b = iter(a) self.assertEqual(len(list(b)), 5) self.assertEqual(list(b), []) def test_sinkstate_yield(self): def gen(): for i in range(5): yield i b = gen() self.assertEqual(list(b), range(5)) self.assertEqual(list(b), []) def test_sinkstate_range(self): a = xrange(5) b = iter(a) self.assertEqual(list(b), range(5)) self.assertEqual(list(b), []) def test_sinkstate_enumerate(self): a = range(5) e = enumerate(a) b = iter(e) self.assertEqual(list(b), zip(range(5), range(5))) self.assertEqual(list(b), []) def test_3720(self): # Avoid a crash, when an iterator deletes its next() method. class BadIterator(object): def __iter__(self): return self def next(self): del BadIterator.next return 1 try: for i in BadIterator() : pass except TypeError: pass def test_main(): run_unittest(TestCase) if __name__ == "__main__": test_main()
apache-2.0
yeming233/horizon
openstack_dashboard/dashboards/project/security_groups/tests.py
2
41700
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cgi from mox3.mox import IsA import six import django from django.conf import settings from django.core.urlresolvers import reverse from django import http from horizon import exceptions from horizon import forms from openstack_dashboard import api from openstack_dashboard.dashboards.project.security_groups import tables from openstack_dashboard.test import helpers as test from openstack_dashboard.usage import quotas INDEX_URL = reverse('horizon:project:security_groups:index') SG_CREATE_URL = reverse('horizon:project:security_groups:create') SG_VIEW_PATH = 'horizon:project:security_groups:%s' SG_DETAIL_VIEW = SG_VIEW_PATH % 'detail' SG_UPDATE_VIEW = SG_VIEW_PATH % 'update' SG_ADD_RULE_VIEW = SG_VIEW_PATH % 'add_rule' SG_TEMPLATE_PATH = 'project/security_groups/%s' SG_DETAIL_TEMPLATE = SG_TEMPLATE_PATH % 'detail.html' SG_CREATE_TEMPLATE = SG_TEMPLATE_PATH % 'create.html' SG_UPDATE_TEMPLATE = SG_TEMPLATE_PATH % '_update.html' def strip_absolute_base(uri): return uri.split(settings.TESTSERVER, 1)[-1] class SecurityGroupsViewTests(test.TestCase): secgroup_backend = 'neutron' def setUp(self): super(SecurityGroupsViewTests, self).setUp() sec_group = self.security_groups.first() self.detail_url = reverse(SG_DETAIL_VIEW, args=[sec_group.id]) self.edit_url = reverse(SG_ADD_RULE_VIEW, args=[sec_group.id]) self.update_url = reverse(SG_UPDATE_VIEW, args=[sec_group.id]) @test.create_stubs({api.neutron: ('security_group_list',), quotas: ('tenant_quota_usages',)}) def test_index(self): sec_groups = self.security_groups.list() quota_data = self.quota_usages.first() quota_data['security_groups']['available'] = 10 api.neutron.security_group_list(IsA(http.HttpRequest)) \ .AndReturn(sec_groups) quotas.tenant_quota_usages( IsA(http.HttpRequest), targets=('security_groups', )).MultipleTimes() \ .AndReturn(quota_data) self.mox.ReplayAll() res = self.client.get(INDEX_URL) self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html') # Security groups sec_groups_from_ctx = res.context['security_groups_table'].data # Context data needs to contains all items from the test data. self.assertItemsEqual(sec_groups_from_ctx, sec_groups) # Sec groups in context need to be sorted by their ``name`` attribute. # This assertion is somewhat weak since it's only meaningful as long as # the sec groups in the test data are *not* sorted by name (which is # the case as of the time of this addition). self.assertTrue( all([sec_groups_from_ctx[i].name <= sec_groups_from_ctx[i + 1].name for i in range(len(sec_groups_from_ctx) - 1)])) @test.create_stubs({api.neutron: ('security_group_list',), quotas: ('tenant_quota_usages',)}) def test_create_button_attributes(self): sec_groups = self.security_groups.list() quota_data = self.quota_usages.first() quota_data['security_groups']['available'] = 10 api.neutron.security_group_list( IsA(http.HttpRequest)) \ .AndReturn(sec_groups) quotas.tenant_quota_usages( IsA(http.HttpRequest), targets=('security_groups', )).MultipleTimes() \ .AndReturn(quota_data) self.mox.ReplayAll() res = self.client.get(INDEX_URL) security_groups = res.context['security_groups_table'].data self.assertItemsEqual(security_groups, self.security_groups.list()) create_action = self.getAndAssertTableAction(res, 'security_groups', 'create') self.assertEqual('Create Security Group', six.text_type(create_action.verbose_name)) self.assertIsNone(create_action.policy_rules) self.assertEqual(set(['ajax-modal']), set(create_action.classes)) url = 'horizon:project:security_groups:create' self.assertEqual(url, create_action.url) @test.create_stubs({api.neutron: ('security_group_list',), quotas: ('tenant_quota_usages',)}) def _test_create_button_disabled_when_quota_exceeded(self, network_enabled): sec_groups = self.security_groups.list() quota_data = self.quota_usages.first() quota_data['security_groups']['available'] = 0 api.neutron.security_group_list( IsA(http.HttpRequest)) \ .AndReturn(sec_groups) quotas.tenant_quota_usages( IsA(http.HttpRequest), targets=('security_groups', )).MultipleTimes() \ .AndReturn(quota_data) self.mox.ReplayAll() res = self.client.get(INDEX_URL) security_groups = res.context['security_groups_table'].data self.assertItemsEqual(security_groups, self.security_groups.list()) create_action = self.getAndAssertTableAction(res, 'security_groups', 'create') self.assertIn('disabled', create_action.classes, 'The create button should be disabled') def test_create_button_disabled_when_quota_exceeded_neutron_disabled(self): self._test_create_button_disabled_when_quota_exceeded(False) def test_create_button_disabled_when_quota_exceeded_neutron_enabled(self): self._test_create_button_disabled_when_quota_exceeded(True) @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def _add_security_group_rule_fixture(self, **kwargs): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() api.neutron.security_group_rule_create( IsA(http.HttpRequest), kwargs.get('sec_group', sec_group.id), kwargs.get('ingress', 'ingress'), kwargs.get('ethertype', 'IPv4'), kwargs.get('ip_protocol', rule.ip_protocol), kwargs.get('from_port', int(rule.from_port)), kwargs.get('to_port', int(rule.to_port)), kwargs.get('cidr', rule.ip_range['cidr']), kwargs.get('security_group', u'%s' % sec_group.id)).AndReturn(rule) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) return sec_group, rule @test.create_stubs({api.neutron: ('security_group_get',)}) def test_update_security_groups_get(self): sec_group = self.security_groups.first() api.neutron.security_group_get(IsA(http.HttpRequest), sec_group.id).AndReturn(sec_group) self.mox.ReplayAll() res = self.client.get(self.update_url) self.assertTemplateUsed(res, SG_UPDATE_TEMPLATE) self.assertEqual(res.context['security_group'].name, sec_group.name) @test.create_stubs({api.neutron: ('security_group_update', 'security_group_get')}) def test_update_security_groups_post(self): """Ensure that we can change a group name. The name must not be restricted to alphanumeric characters. bug #1233501 Security group names cannot contain at characters bug #1224576 Security group names cannot contain spaces """ sec_group = self.security_groups.first() sec_group.name = "@new name" api.neutron.security_group_update( IsA(http.HttpRequest), str(sec_group.id), sec_group.name, sec_group.description).AndReturn(sec_group) api.neutron.security_group_get( IsA(http.HttpRequest), sec_group.id).AndReturn(sec_group) self.mox.ReplayAll() form_data = {'method': 'UpdateGroup', 'id': sec_group.id, 'name': sec_group.name, 'description': sec_group.description} res = self.client.post(self.update_url, form_data) self.assertRedirectsNoFollow(res, INDEX_URL) def test_create_security_groups_get(self): res = self.client.get(SG_CREATE_URL) self.assertTemplateUsed(res, SG_CREATE_TEMPLATE) def test_create_security_groups_post(self): sec_group = self.security_groups.first() self._create_security_group(sec_group) def test_create_security_groups_special_chars(self): """Ensure non-alphanumeric characters can be used as a group name. bug #1233501 Security group names cannot contain at characters bug #1224576 Security group names cannot contain spaces """ sec_group = self.security_groups.first() sec_group.name = '@group name-\xe3\x82\xb3' self._create_security_group(sec_group) @test.create_stubs({api.neutron: ('security_group_create',)}) def _create_security_group(self, sec_group): api.neutron.security_group_create( IsA(http.HttpRequest), sec_group.name, sec_group.description).AndReturn(sec_group) self.mox.ReplayAll() form_data = {'method': 'CreateGroup', 'name': sec_group.name, 'description': sec_group.description} res = self.client.post(SG_CREATE_URL, form_data) self.assertRedirectsNoFollow(res, INDEX_URL) @test.create_stubs({api.neutron: ('security_group_create',)}) def test_create_security_groups_post_exception(self): sec_group = self.security_groups.first() api.neutron.security_group_create( IsA(http.HttpRequest), sec_group.name, sec_group.description).AndRaise(self.exceptions.nova) self.mox.ReplayAll() formData = {'method': 'CreateGroup', 'name': sec_group.name, 'description': sec_group.description} res = self.client.post(SG_CREATE_URL, formData) self.assertMessageCount(error=1) self.assertRedirectsNoFollow(res, INDEX_URL) @test.create_stubs({api.neutron: ('security_group_get',)}) def test_detail_get(self): sec_group = self.security_groups.first() api.neutron.security_group_get(IsA(http.HttpRequest), sec_group.id).AndReturn(sec_group) self.mox.ReplayAll() res = self.client.get(self.detail_url) self.assertTemplateUsed(res, SG_DETAIL_TEMPLATE) @test.create_stubs({api.neutron: ('security_group_get',)}) def test_detail_get_exception(self): sec_group = self.security_groups.first() api.neutron.security_group_get( IsA(http.HttpRequest), sec_group.id).AndRaise(self.exceptions.nova) self.mox.ReplayAll() res = self.client.get(self.detail_url) self.assertRedirectsNoFollow(res, INDEX_URL) def test_detail_add_rule_cidr(self): sec_group, rule = self._add_security_group_rule_fixture( security_group=None) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': rule.from_port, 'rule_menu': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) def test_detail_add_rule_cidr_with_invalid_unused_fields(self): sec_group, rule = self._add_security_group_rule_fixture( security_group=None) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': rule.from_port, 'to_port': 'INVALID', 'from_port': 'INVALID', 'icmp_code': 'INVALID', 'icmp_type': 'INVALID', 'security_group': 'INVALID', 'ip_protocol': 'INVALID', 'rule_menu': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, self.detail_url) def test_detail_add_rule_securitygroup_with_invalid_unused_fields(self): sec_group, rule = self._add_security_group_rule_fixture( cidr=None, ethertype='') self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': rule.from_port, 'to_port': 'INVALID', 'from_port': 'INVALID', 'icmp_code': 'INVALID', 'icmp_type': 'INVALID', 'security_group': sec_group.id, 'ip_protocol': 'INVALID', 'rule_menu': rule.ip_protocol, 'cidr': 'INVALID', 'remote': 'sg'} res = self.client.post(self.edit_url, formData) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, self.detail_url) def test_detail_add_rule_icmp_with_invalid_unused_fields(self): sec_group, rule = self._add_security_group_rule_fixture( ip_protocol='icmp', security_group=None) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': 'INVALID', 'to_port': 'INVALID', 'from_port': 'INVALID', 'icmp_code': rule.to_port, 'icmp_type': rule.from_port, 'security_group': sec_group.id, 'ip_protocol': 'INVALID', 'rule_menu': 'icmp', 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, self.detail_url) @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def test_detail_add_rule_cidr_with_template(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() api.neutron.security_group_rule_create(IsA(http.HttpRequest), sec_group.id, 'ingress', 'IPv4', rule.ip_protocol, int(rule.from_port), int(rule.to_port), rule.ip_range['cidr'], None).AndReturn(rule) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'rule_menu': 'http', 'port_or_range': 'port', 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) def _get_source_group_rule(self): for rule in self.security_group_rules.list(): if rule.group: return rule raise Exception("No matches found.") @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list',)}) def test_detail_add_rule_self_as_source_group(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self._get_source_group_rule() api.neutron.security_group_rule_create( IsA(http.HttpRequest), sec_group.id, 'ingress', # ethertype is empty for source_group of Nova Security Group '', rule.ip_protocol, int(rule.from_port), int(rule.to_port), None, u'%s' % sec_group.id).AndReturn(rule) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': rule.from_port, 'rule_menu': rule.ip_protocol, 'cidr': '0.0.0.0/0', 'security_group': sec_group.id, 'remote': 'sg'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list',)}) def test_detail_add_rule_self_as_source_group_with_template(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self._get_source_group_rule() api.neutron.security_group_rule_create( IsA(http.HttpRequest), sec_group.id, 'ingress', # ethertype is empty for source_group of Nova Security Group '', rule.ip_protocol, int(rule.from_port), int(rule.to_port), None, u'%s' % sec_group.id).AndReturn(rule) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'rule_menu': 'http', 'port_or_range': 'port', 'cidr': '0.0.0.0/0', 'security_group': sec_group.id, 'remote': 'sg'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) @test.create_stubs({api.neutron: ('security_group_list',)}) def test_detail_invalid_port(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) if django.VERSION >= (1, 9): api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': None, 'rule_menu': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "The specified port is invalid") @test.create_stubs({api.neutron: ('security_group_list',)}) def test_detail_invalid_port_range(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() for i in range(3): api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) if django.VERSION >= (1, 9): for i in range(3): api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'range', 'from_port': rule.from_port, 'to_port': int(rule.from_port) - 1, 'rule_menu': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "greater than or equal to") formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'range', 'from_port': None, 'to_port': rule.to_port, 'rule_menu': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, cgi.escape('"from" port number is invalid', quote=True)) formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'range', 'from_port': rule.from_port, 'to_port': None, 'rule_menu': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, cgi.escape('"to" port number is invalid', quote=True)) @test.create_stubs({api.neutron: ('security_group_get', 'security_group_list')}) def test_detail_invalid_icmp_rule(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() icmp_rule = self.security_group_rules.list()[1] # Call POST 5 times (*2 if Django >= 1.9) call_post = 5 if django.VERSION >= (1, 9): call_post *= 2 for i in range(call_post): api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'icmp_type': 256, 'icmp_code': icmp_rule.to_port, 'rule_menu': icmp_rule.ip_protocol, 'cidr': icmp_rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "The ICMP type not in range (-1, 255)") formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'icmp_type': icmp_rule.from_port, 'icmp_code': 256, 'rule_menu': icmp_rule.ip_protocol, 'cidr': icmp_rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "The ICMP code not in range (-1, 255)") formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'icmp_type': icmp_rule.from_port, 'icmp_code': None, 'rule_menu': icmp_rule.ip_protocol, 'cidr': icmp_rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "The ICMP code not in range (-1, 255)") formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'icmp_type': None, 'icmp_code': icmp_rule.to_port, 'rule_menu': icmp_rule.ip_protocol, 'cidr': icmp_rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "The ICMP type not in range (-1, 255)") formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'icmp_type': -1, 'icmp_code': icmp_rule.to_port, 'rule_menu': icmp_rule.ip_protocol, 'cidr': icmp_rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains( res, "ICMP code is provided but ICMP type is missing.") @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def test_detail_add_rule_exception(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() api.neutron.security_group_rule_create( IsA(http.HttpRequest), sec_group.id, 'ingress', 'IPv4', rule.ip_protocol, int(rule.from_port), int(rule.to_port), rule.ip_range['cidr'], None).AndRaise(self.exceptions.nova) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': rule.from_port, 'rule_menu': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def test_detail_add_rule_duplicated(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() api.neutron.security_group_rule_create( IsA(http.HttpRequest), sec_group.id, 'ingress', 'IPv4', rule.ip_protocol, int(rule.from_port), int(rule.to_port), rule.ip_range['cidr'], None).AndRaise(exceptions.Conflict) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': rule.from_port, 'rule_menu': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, self.detail_url) @test.create_stubs({api.neutron: ('security_group_rule_delete',)}) def test_detail_delete_rule(self): sec_group = self.security_groups.first() rule = self.security_group_rules.first() api.neutron.security_group_rule_delete(IsA(http.HttpRequest), rule.id) self.mox.ReplayAll() form_data = {"action": "rules__delete__%s" % rule.id} req = self.factory.post(self.edit_url, form_data) kwargs = {'security_group_id': sec_group.id} table = tables.RulesTable(req, sec_group.rules, **kwargs) handled = table.maybe_handle() self.assertEqual(strip_absolute_base(handled['location']), self.detail_url) @test.create_stubs({api.neutron: ('security_group_rule_delete',)}) def test_detail_delete_rule_exception(self): sec_group = self.security_groups.first() rule = self.security_group_rules.first() api.neutron.security_group_rule_delete( IsA(http.HttpRequest), rule.id).AndRaise(self.exceptions.nova) self.mox.ReplayAll() form_data = {"action": "rules__delete__%s" % rule.id} req = self.factory.post(self.edit_url, form_data) kwargs = {'security_group_id': sec_group.id} table = tables.RulesTable( req, self.security_group_rules.list(), **kwargs) handled = table.maybe_handle() self.assertEqual(strip_absolute_base(handled['location']), self.detail_url) @test.create_stubs({api.neutron: ('security_group_delete',)}) def test_delete_group(self): sec_group = self.security_groups.get(name="other_group") api.neutron.security_group_delete(IsA(http.HttpRequest), sec_group.id) self.mox.ReplayAll() form_data = {"action": "security_groups__delete__%s" % sec_group.id} req = self.factory.post(INDEX_URL, form_data) table = tables.SecurityGroupsTable(req, self.security_groups.list()) handled = table.maybe_handle() self.assertEqual(strip_absolute_base(handled['location']), INDEX_URL) @test.create_stubs({api.neutron: ('security_group_delete',)}) def test_delete_group_exception(self): sec_group = self.security_groups.get(name="other_group") api.neutron.security_group_delete( IsA(http.HttpRequest), sec_group.id).AndRaise(self.exceptions.nova) self.mox.ReplayAll() form_data = {"action": "security_groups__delete__%s" % sec_group.id} req = self.factory.post(INDEX_URL, form_data) table = tables.SecurityGroupsTable(req, self.security_groups.list()) handled = table.maybe_handle() self.assertEqual(strip_absolute_base(handled['location']), INDEX_URL) @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def test_detail_add_rule_custom_protocol(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() api.neutron.security_group_rule_create(IsA(http.HttpRequest), sec_group.id, 'ingress', 'IPv6', 37, None, None, 'fe80::/48', None).AndReturn(rule) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'rule_menu': 'custom', 'direction': 'ingress', 'port_or_range': 'port', 'ip_protocol': 37, 'cidr': 'fe80::/48', 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def test_detail_add_rule_egress(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() api.neutron.security_group_rule_create(IsA(http.HttpRequest), sec_group.id, 'egress', 'IPv4', 'udp', 80, 80, '10.1.1.0/24', None).AndReturn(rule) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'direction': 'egress', 'rule_menu': 'udp', 'port_or_range': 'port', 'port': 80, 'cidr': '10.1.1.0/24', 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def test_detail_add_rule_egress_with_all_tcp(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.list()[3] api.neutron.security_group_rule_create(IsA(http.HttpRequest), sec_group.id, 'egress', 'IPv4', rule.ip_protocol, int(rule.from_port), int(rule.to_port), rule.ip_range['cidr'], None).AndReturn(rule) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'direction': 'egress', 'port_or_range': 'range', 'rule_menu': 'all_tcp', 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def test_detail_add_rule_source_group_with_direction_ethertype(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self._get_source_group_rule() api.neutron.security_group_rule_create( IsA(http.HttpRequest), sec_group.id, 'egress', # ethertype is empty for source_group of Nova Security Group 'IPv6', rule.ip_protocol, int(rule.from_port), int(rule.to_port), None, u'%s' % sec_group.id).AndReturn(rule) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'direction': 'egress', 'port_or_range': 'port', 'port': rule.from_port, 'rule_menu': rule.ip_protocol, 'cidr': '0.0.0.0/0', 'security_group': sec_group.id, 'remote': 'sg', 'ethertype': 'IPv6'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) @test.update_settings( OPENSTACK_NEUTRON_NETWORK={'enable_ipv6': False}) @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def test_add_rule_ethertype_with_ipv6_disabled(self): self.mox.ReplayAll() res = self.client.get(self.edit_url) self.assertIsInstance( res.context['form']['ethertype'].field.widget, forms.TextInput ) self.assertIn( 'readonly', res.context['form']['ethertype'].field.widget.attrs ) self.assertEqual( res.context['form']['ethertype'].field.initial, 'IPv4' ) @test.update_settings( OPENSTACK_NEUTRON_NETWORK={'enable_ipv6': False}) @test.create_stubs({api.neutron: ('security_group_list',)}) def test_add_rule_cidr_with_ipv6_disabled(self): sec_group = self.security_groups.first() self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'rule_menu': 'custom', 'direction': 'ingress', 'port_or_range': 'port', 'ip_protocol': 37, 'cidr': 'fe80::/48', 'etherype': 'IPv4', 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertFormError(res, 'form', 'cidr', 'Invalid version for IP address') @test.create_stubs({api.neutron: ('security_group_list',)}) def test_detail_add_rule_invalid_port(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) if django.VERSION >= (1, 9): api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': -1, 'rule_menu': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "Not a valid port number") @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def test_detail_add_rule_ingress_tcp_without_port(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.list()[3] api.neutron.security_group_rule_create(IsA(http.HttpRequest), sec_group.id, 'ingress', 'IPv4', 'tcp', None, None, rule.ip_range['cidr'], None).AndReturn(rule) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'id': sec_group.id, 'direction': 'ingress', 'port_or_range': 'all', 'rule_menu': 'tcp', 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) @test.create_stubs({api.neutron: ('security_group_rule_create', 'security_group_list')}) def test_detail_add_rule_custom_without_protocol(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.list()[3] api.neutron.security_group_rule_create(IsA(http.HttpRequest), sec_group.id, 'ingress', 'IPv4', None, None, None, rule.ip_range['cidr'], None).AndReturn(rule) api.neutron.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'id': sec_group.id, 'direction': 'ingress', 'port_or_range': 'port', 'rule_menu': 'custom', 'ip_protocol': -1, 'cidr': rule.ip_range['cidr'], 'remote': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url)
apache-2.0
dennis-sheil/commandergenius
project/jni/python/src/Lib/gettext.py
73
19890
"""Internationalization and localization support. This module provides internationalization (I18N) and localization (L10N) support for your Python programs by providing an interface to the GNU gettext message catalog library. I18N refers to the operation by which a program is made aware of multiple languages. L10N refers to the adaptation of your program, once internationalized, to the local language and cultural habits. """ # This module represents the integration of work, contributions, feedback, and # suggestions from the following people: # # Martin von Loewis, who wrote the initial implementation of the underlying # C-based libintlmodule (later renamed _gettext), along with a skeletal # gettext.py implementation. # # Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule, # which also included a pure-Python implementation to read .mo files if # intlmodule wasn't available. # # James Henstridge, who also wrote a gettext.py module, which has some # interesting, but currently unsupported experimental features: the notion of # a Catalog class and instances, and the ability to add to a catalog file via # a Python API. # # Barry Warsaw integrated these modules, wrote the .install() API and code, # and conformed all C and Python code to Python's coding standards. # # Francois Pinard and Marc-Andre Lemburg also contributed valuably to this # module. # # J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs. # # TODO: # - Lazy loading of .mo files. Currently the entire catalog is loaded into # memory, but that's probably bad for large translated programs. Instead, # the lexical sort of original strings in GNU .mo files should be exploited # to do binary searches and lazy initializations. Or you might want to use # the undocumented double-hash algorithm for .mo files with hash tables, but # you'll need to study the GNU gettext code to do this. # # - Support Solaris .mo file formats. Unfortunately, we've been unable to # find this format documented anywhere. import locale, copy, os, re, struct, sys from errno import ENOENT __all__ = ['NullTranslations', 'GNUTranslations', 'Catalog', 'find', 'translation', 'install', 'textdomain', 'bindtextdomain', 'dgettext', 'dngettext', 'gettext', 'ngettext', ] _default_localedir = os.path.join(sys.prefix, 'share', 'locale') def test(condition, true, false): """ Implements the C expression: condition ? true : false Required to correctly interpret plural forms. """ if condition: return true else: return false def c2py(plural): """Gets a C expression as used in PO files for plural forms and returns a Python lambda function that implements an equivalent expression. """ # Security check, allow only the "n" identifier try: from cStringIO import StringIO except ImportError: from StringIO import StringIO import token, tokenize tokens = tokenize.generate_tokens(StringIO(plural).readline) try: danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n'] except tokenize.TokenError: raise ValueError, \ 'plural forms expression error, maybe unbalanced parenthesis' else: if danger: raise ValueError, 'plural forms expression could be dangerous' # Replace some C operators by their Python equivalents plural = plural.replace('&&', ' and ') plural = plural.replace('||', ' or ') expr = re.compile(r'\!([^=])') plural = expr.sub(' not \\1', plural) # Regular expression and replacement function used to transform # "a?b:c" to "test(a,b,c)". expr = re.compile(r'(.*?)\?(.*?):(.*)') def repl(x): return "test(%s, %s, %s)" % (x.group(1), x.group(2), expr.sub(repl, x.group(3))) # Code to transform the plural expression, taking care of parentheses stack = [''] for c in plural: if c == '(': stack.append('') elif c == ')': if len(stack) == 1: # Actually, we never reach this code, because unbalanced # parentheses get caught in the security check at the # beginning. raise ValueError, 'unbalanced parenthesis in plural form' s = expr.sub(repl, stack.pop()) stack[-1] += '(%s)' % s else: stack[-1] += c plural = expr.sub(repl, stack.pop()) return eval('lambda n: int(%s)' % plural) def _expand_lang(locale): from locale import normalize locale = normalize(locale) COMPONENT_CODESET = 1 << 0 COMPONENT_TERRITORY = 1 << 1 COMPONENT_MODIFIER = 1 << 2 # split up the locale into its base components mask = 0 pos = locale.find('@') if pos >= 0: modifier = locale[pos:] locale = locale[:pos] mask |= COMPONENT_MODIFIER else: modifier = '' pos = locale.find('.') if pos >= 0: codeset = locale[pos:] locale = locale[:pos] mask |= COMPONENT_CODESET else: codeset = '' pos = locale.find('_') if pos >= 0: territory = locale[pos:] locale = locale[:pos] mask |= COMPONENT_TERRITORY else: territory = '' language = locale ret = [] for i in range(mask+1): if not (i & ~mask): # if all components for this combo exist ... val = language if i & COMPONENT_TERRITORY: val += territory if i & COMPONENT_CODESET: val += codeset if i & COMPONENT_MODIFIER: val += modifier ret.append(val) ret.reverse() return ret class NullTranslations: def __init__(self, fp=None): self._info = {} self._charset = None self._output_charset = None self._fallback = None if fp is not None: self._parse(fp) def _parse(self, fp): pass def add_fallback(self, fallback): if self._fallback: self._fallback.add_fallback(fallback) else: self._fallback = fallback def gettext(self, message): if self._fallback: return self._fallback.gettext(message) return message def lgettext(self, message): if self._fallback: return self._fallback.lgettext(message) return message def ngettext(self, msgid1, msgid2, n): if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) if n == 1: return msgid1 else: return msgid2 def lngettext(self, msgid1, msgid2, n): if self._fallback: return self._fallback.lngettext(msgid1, msgid2, n) if n == 1: return msgid1 else: return msgid2 def ugettext(self, message): if self._fallback: return self._fallback.ugettext(message) return unicode(message) def ungettext(self, msgid1, msgid2, n): if self._fallback: return self._fallback.ungettext(msgid1, msgid2, n) if n == 1: return unicode(msgid1) else: return unicode(msgid2) def info(self): return self._info def charset(self): return self._charset def output_charset(self): return self._output_charset def set_output_charset(self, charset): self._output_charset = charset def install(self, unicode=False, names=None): import __builtin__ __builtin__.__dict__['_'] = unicode and self.ugettext or self.gettext if hasattr(names, "__contains__"): if "gettext" in names: __builtin__.__dict__['gettext'] = __builtin__.__dict__['_'] if "ngettext" in names: __builtin__.__dict__['ngettext'] = (unicode and self.ungettext or self.ngettext) if "lgettext" in names: __builtin__.__dict__['lgettext'] = self.lgettext if "lngettext" in names: __builtin__.__dict__['lngettext'] = self.lngettext class GNUTranslations(NullTranslations): # Magic number of .mo files LE_MAGIC = 0x950412deL BE_MAGIC = 0xde120495L def _parse(self, fp): """Override this method to support alternative .mo formats.""" unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} self.plural = lambda n: int(n != 1) # germanic plural by default buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<I', buf[:4])[0] if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20]) ii = '<II' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20]) ii = '>II' else: raise IOError(0, 'Bad magic number', filename) # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in xrange(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) mend = moff + mlen tlen, toff = unpack(ii, buf[transidx:transidx+8]) tend = toff + tlen if mend < buflen and tend < buflen: msg = buf[moff:mend] tmsg = buf[toff:tend] else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0: # Catalog description lastk = k = None for item in tmsg.splitlines(): item = item.strip() if not item: continue if ':' in item: k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v lastk = k elif lastk: self._info[lastk] += '\n' + item if k == 'content-type': self._charset = v.split('charset=')[1] elif k == 'plural-forms': v = v.split(';') plural = v[1].split('plural=')[1] self.plural = c2py(plural) # Note: we unconditionally convert both msgids and msgstrs to # Unicode using the character encoding specified in the charset # parameter of the Content-Type header. The gettext documentation # strongly encourages msgids to be us-ascii, but some appliations # require alternative encodings (e.g. Zope's ZCML and ZPT). For # traditional gettext applications, the msgid conversion will # cause no problems since us-ascii should always be a subset of # the charset encoding. We may want to fall back to 8-bit msgids # if the Unicode conversion fails. if '\x00' in msg: # Plural forms msgid1, msgid2 = msg.split('\x00') tmsg = tmsg.split('\x00') if self._charset: msgid1 = unicode(msgid1, self._charset) tmsg = [unicode(x, self._charset) for x in tmsg] for i in range(len(tmsg)): catalog[(msgid1, i)] = tmsg[i] else: if self._charset: msg = unicode(msg, self._charset) tmsg = unicode(tmsg, self._charset) catalog[msg] = tmsg # advance to next entry in the seek tables masteridx += 8 transidx += 8 def gettext(self, message): missing = object() tmsg = self._catalog.get(message, missing) if tmsg is missing: if self._fallback: return self._fallback.gettext(message) return message # Encode the Unicode tmsg back to an 8-bit string, if possible if self._output_charset: return tmsg.encode(self._output_charset) elif self._charset: return tmsg.encode(self._charset) return tmsg def lgettext(self, message): missing = object() tmsg = self._catalog.get(message, missing) if tmsg is missing: if self._fallback: return self._fallback.lgettext(message) return message if self._output_charset: return tmsg.encode(self._output_charset) return tmsg.encode(locale.getpreferredencoding()) def ngettext(self, msgid1, msgid2, n): try: tmsg = self._catalog[(msgid1, self.plural(n))] if self._output_charset: return tmsg.encode(self._output_charset) elif self._charset: return tmsg.encode(self._charset) return tmsg except KeyError: if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) if n == 1: return msgid1 else: return msgid2 def lngettext(self, msgid1, msgid2, n): try: tmsg = self._catalog[(msgid1, self.plural(n))] if self._output_charset: return tmsg.encode(self._output_charset) return tmsg.encode(locale.getpreferredencoding()) except KeyError: if self._fallback: return self._fallback.lngettext(msgid1, msgid2, n) if n == 1: return msgid1 else: return msgid2 def ugettext(self, message): missing = object() tmsg = self._catalog.get(message, missing) if tmsg is missing: if self._fallback: return self._fallback.ugettext(message) return unicode(message) return tmsg def ungettext(self, msgid1, msgid2, n): try: tmsg = self._catalog[(msgid1, self.plural(n))] except KeyError: if self._fallback: return self._fallback.ungettext(msgid1, msgid2, n) if n == 1: tmsg = unicode(msgid1) else: tmsg = unicode(msgid2) return tmsg # Locate a .mo file using the gettext strategy def find(domain, localedir=None, languages=None, all=0): # Get some reasonable defaults for arguments that were not supplied if localedir is None: localedir = _default_localedir if languages is None: languages = [] for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'): val = os.environ.get(envar) if val: languages = val.split(':') break if 'C' not in languages: languages.append('C') # now normalize and expand the languages nelangs = [] for lang in languages: for nelang in _expand_lang(lang): if nelang not in nelangs: nelangs.append(nelang) # select a language if all: result = [] else: result = None for lang in nelangs: if lang == 'C': break mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain) if os.path.exists(mofile): if all: result.append(mofile) else: return mofile return result # a mapping between absolute .mo file path and Translation object _translations = {} def translation(domain, localedir=None, languages=None, class_=None, fallback=False, codeset=None): if class_ is None: class_ = GNUTranslations mofiles = find(domain, localedir, languages, all=1) if not mofiles: if fallback: return NullTranslations() raise IOError(ENOENT, 'No translation file found for domain', domain) # TBD: do we need to worry about the file pointer getting collected? # Avoid opening, reading, and parsing the .mo file after it's been done # once. result = None for mofile in mofiles: key = os.path.abspath(mofile) t = _translations.get(key) if t is None: t = _translations.setdefault(key, class_(open(mofile, 'rb'))) # Copy the translation object to allow setting fallbacks and # output charset. All other instance data is shared with the # cached object. t = copy.copy(t) if codeset: t.set_output_charset(codeset) if result is None: result = t else: result.add_fallback(t) return result def install(domain, localedir=None, unicode=False, codeset=None, names=None): t = translation(domain, localedir, fallback=True, codeset=codeset) t.install(unicode, names) # a mapping b/w domains and locale directories _localedirs = {} # a mapping b/w domains and codesets _localecodesets = {} # current global domain, `messages' used for compatibility w/ GNU gettext _current_domain = 'messages' def textdomain(domain=None): global _current_domain if domain is not None: _current_domain = domain return _current_domain def bindtextdomain(domain, localedir=None): global _localedirs if localedir is not None: _localedirs[domain] = localedir return _localedirs.get(domain, _default_localedir) def bind_textdomain_codeset(domain, codeset=None): global _localecodesets if codeset is not None: _localecodesets[domain] = codeset return _localecodesets.get(domain) def dgettext(domain, message): try: t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain)) except IOError: return message return t.gettext(message) def ldgettext(domain, message): try: t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain)) except IOError: return message return t.lgettext(message) def dngettext(domain, msgid1, msgid2, n): try: t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain)) except IOError: if n == 1: return msgid1 else: return msgid2 return t.ngettext(msgid1, msgid2, n) def ldngettext(domain, msgid1, msgid2, n): try: t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain)) except IOError: if n == 1: return msgid1 else: return msgid2 return t.lngettext(msgid1, msgid2, n) def gettext(message): return dgettext(_current_domain, message) def lgettext(message): return ldgettext(_current_domain, message) def ngettext(msgid1, msgid2, n): return dngettext(_current_domain, msgid1, msgid2, n) def lngettext(msgid1, msgid2, n): return ldngettext(_current_domain, msgid1, msgid2, n) # dcgettext() has been deemed unnecessary and is not implemented. # James Henstridge's Catalog constructor from GNOME gettext. Documented usage # was: # # import gettext # cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR) # _ = cat.gettext # print _('Hello World') # The resulting catalog object currently don't support access through a # dictionary API, which was supported (but apparently unused) in GNOME # gettext. Catalog = translation
lgpl-2.1
gaborbernat/tox
src/tox/package/builder/isolated.py
2
5403
from __future__ import unicode_literals import json import os from collections import namedtuple import six from packaging.requirements import Requirement from packaging.utils import canonicalize_name from tox import reporter from tox.config import DepConfig, get_py_project_toml from tox.constants import BUILD_ISOLATED, BUILD_REQUIRE_SCRIPT BuildInfo = namedtuple( "BuildInfo", ["requires", "backend_module", "backend_object", "backend_paths"], ) def build(config, session): build_info = get_build_info(config.setupdir) package_venv = session.getvenv(config.isolated_build_env) package_venv.envconfig.deps_matches_subset = True # we allow user specified dependencies so the users can write extensions to # install additional type of dependencies (e.g. binary) user_specified_deps = package_venv.envconfig.deps package_venv.envconfig.deps = [DepConfig(r, None) for r in build_info.requires] package_venv.envconfig.deps.extend(user_specified_deps) if package_venv.setupenv(): package_venv.finishvenv() if isinstance(package_venv.status, Exception): raise package_venv.status build_requires = get_build_requires(build_info, package_venv, config.setupdir) # we need to filter out requirements already specified in pyproject.toml or user deps base_build_deps = { canonicalize_name(Requirement(r.name).name) for r in package_venv.envconfig.deps } build_requires_dep = [ DepConfig(r, None) for r in build_requires if canonicalize_name(Requirement(r).name) not in base_build_deps ] if build_requires_dep: with package_venv.new_action("build_requires", package_venv.envconfig.envdir) as action: package_venv.run_install_command(packages=build_requires_dep, action=action) package_venv.finishvenv() return perform_isolated_build(build_info, package_venv, config.distdir, config.setupdir) def get_build_info(folder): toml_file = folder.join("pyproject.toml") # as per https://www.python.org/dev/peps/pep-0517/ def abort(message): reporter.error("{} inside {}".format(message, toml_file)) raise SystemExit(1) if not toml_file.exists(): reporter.error("missing {}".format(toml_file)) raise SystemExit(1) config_data = get_py_project_toml(toml_file) if "build-system" not in config_data: abort("build-system section missing") build_system = config_data["build-system"] if "requires" not in build_system: abort("missing requires key at build-system section") if "build-backend" not in build_system: abort("missing build-backend key at build-system section") requires = build_system["requires"] if not isinstance(requires, list) or not all(isinstance(i, six.text_type) for i in requires): abort("requires key at build-system section must be a list of string") backend = build_system["build-backend"] if not isinstance(backend, six.text_type): abort("build-backend key at build-system section must be a string") args = backend.split(":") module = args[0] obj = args[1] if len(args) > 1 else "" backend_paths = build_system.get("backend-path", []) if not isinstance(backend_paths, list): abort("backend-path key at build-system section must be a list, if specified") backend_paths = [folder.join(p) for p in backend_paths] normalized_folder = os.path.normcase(str(folder.realpath())) normalized_paths = (os.path.normcase(str(path.realpath())) for path in backend_paths) if not all( os.path.commonprefix((normalized_folder, path)) == normalized_folder for path in normalized_paths ): abort("backend-path must exist in the project root") return BuildInfo(requires, module, obj, backend_paths) def perform_isolated_build(build_info, package_venv, dist_dir, setup_dir): with package_venv.new_action( "perform-isolated-build", package_venv.envconfig.envdir, ) as action: # need to start with an empty (but existing) source distribution folder if dist_dir.exists(): dist_dir.remove(rec=1, ignore_errors=True) dist_dir.ensure_dir() result = package_venv._pcall( [ package_venv.envconfig.envpython, BUILD_ISOLATED, str(dist_dir), build_info.backend_module, build_info.backend_object, os.path.pathsep.join(str(p) for p in build_info.backend_paths), ], returnout=True, action=action, cwd=setup_dir, ) reporter.verbosity2(result) return dist_dir.join(result.split("\n")[-2]) def get_build_requires(build_info, package_venv, setup_dir): with package_venv.new_action("get-build-requires", package_venv.envconfig.envdir) as action: result = package_venv._pcall( [ package_venv.envconfig.envpython, BUILD_REQUIRE_SCRIPT, build_info.backend_module, build_info.backend_object, os.path.pathsep.join(str(p) for p in build_info.backend_paths), ], returnout=True, action=action, cwd=setup_dir, ) return json.loads(result.split("\n")[-2])
mit
tralamazza/micropython
tests/basics/op_error.py
6
1386
# test errors from bad operations (unary, binary, etc) # unsupported unary operators try: ~None except TypeError: print('TypeError') try: ~'' except TypeError: print('TypeError') try: ~[] except TypeError: print('TypeError') try: ~bytearray() except TypeError: print('TypeError') # unsupported binary operators try: False in True except TypeError: print('TypeError') try: 1 * {} except TypeError: print('TypeError') try: 1 in 1 except TypeError: print('TypeError') try: bytearray() // 2 except TypeError: print('TypeError') # object with buffer protocol needed on rhs try: bytearray(1) + 1 except TypeError: print('TypeError') # unsupported subscription try: 1[0] except TypeError: print('TypeError') try: 1[0] = 1 except TypeError: print('TypeError') try: ''[''] except TypeError: print('TypeError') try: 'a'[0] = 1 except TypeError: print('TypeError') try: del 1[0] except TypeError: print('TypeError') # not callable try: 1() except TypeError: print('TypeError') # not an iterator try: next(1) except TypeError: print('TypeError') # must be an exception type try: raise 1 except TypeError: print('TypeError') # no such name in import try: from sys import youcannotimportmebecauseidontexist except ImportError: print('ImportError')
mit
Frankkkkk/arctic
tests/integration/scripts/test_list_libraries.py
5
1033
from mock import patch, call import pytest from arctic.scripts import arctic_list_libraries from ...util import run_as_main def test_list_library(mongo_host, library, library_name): with patch('arctic.scripts.arctic_list_libraries.print') as p: run_as_main(arctic_list_libraries.main, "--host", mongo_host) for x in p.call_args_list: if x == call(library_name): return assert False, "Failed to find a library" def test_list_library_args(mongo_host, library, library_name): with patch('arctic.scripts.arctic_list_libraries.print') as p: run_as_main(arctic_list_libraries.main, "--host", mongo_host, library_name[:2]) for x in p.call_args_list: assert x[0][0].startswith(library_name[:2]) def test_list_library_args_not_found(mongo_host, library, library_name): with patch('arctic.scripts.arctic_list_libraries.print') as p: run_as_main(arctic_list_libraries.main, "--host", mongo_host, 'some_library_which_doesnt_exist') assert p.call_count == 0
lgpl-2.1
sosolimited/Cinder
docs/libs/bs4/testing.py
440
24510
"""Helper classes for tests.""" import copy import functools import unittest from unittest import TestCase from bs4 import BeautifulSoup from bs4.element import ( CharsetMetaAttributeValue, Comment, ContentMetaAttributeValue, Doctype, SoupStrainer, ) from bs4.builder import HTMLParserTreeBuilder default_builder = HTMLParserTreeBuilder class SoupTest(unittest.TestCase): @property def default_builder(self): return default_builder() def soup(self, markup, **kwargs): """Build a Beautiful Soup object from markup.""" builder = kwargs.pop('builder', self.default_builder) return BeautifulSoup(markup, builder=builder, **kwargs) def document_for(self, markup): """Turn an HTML fragment into a document. The details depend on the builder. """ return self.default_builder.test_fragment_to_document(markup) def assertSoupEquals(self, to_parse, compare_parsed_to=None): builder = self.default_builder obj = BeautifulSoup(to_parse, builder=builder) if compare_parsed_to is None: compare_parsed_to = to_parse self.assertEqual(obj.decode(), self.document_for(compare_parsed_to)) class HTMLTreeBuilderSmokeTest(object): """A basic test of a treebuilder's competence. Any HTML treebuilder, present or future, should be able to pass these tests. With invalid markup, there's room for interpretation, and different parsers can handle it differently. But with the markup in these tests, there's not much room for interpretation. """ def assertDoctypeHandled(self, doctype_fragment): """Assert that a given doctype string is handled correctly.""" doctype_str, soup = self._document_with_doctype(doctype_fragment) # Make sure a Doctype object was created. doctype = soup.contents[0] self.assertEqual(doctype.__class__, Doctype) self.assertEqual(doctype, doctype_fragment) self.assertEqual(str(soup)[:len(doctype_str)], doctype_str) # Make sure that the doctype was correctly associated with the # parse tree and that the rest of the document parsed. self.assertEqual(soup.p.contents[0], 'foo') def _document_with_doctype(self, doctype_fragment): """Generate and parse a document with the given doctype.""" doctype = '<!DOCTYPE %s>' % doctype_fragment markup = doctype + '\n<p>foo</p>' soup = self.soup(markup) return doctype, soup def test_normal_doctypes(self): """Make sure normal, everyday HTML doctypes are handled correctly.""" self.assertDoctypeHandled("html") self.assertDoctypeHandled( 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"') def test_empty_doctype(self): soup = self.soup("<!DOCTYPE>") doctype = soup.contents[0] self.assertEqual("", doctype.strip()) def test_public_doctype_with_url(self): doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' self.assertDoctypeHandled(doctype) def test_system_doctype(self): self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"') def test_namespaced_system_doctype(self): # We can handle a namespaced doctype with a system ID. self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"') def test_namespaced_public_doctype(self): # Test a namespaced doctype with a public id. self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"') def test_real_xhtml_document(self): """A real XHTML document should come out more or less the same as it went in.""" markup = b"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>Hello.</title></head> <body>Goodbye.</body> </html>""" soup = self.soup(markup) self.assertEqual( soup.encode("utf-8").replace(b"\n", b""), markup.replace(b"\n", b"")) def test_deepcopy(self): """Make sure you can copy the tree builder. This is important because the builder is part of a BeautifulSoup object, and we want to be able to copy that. """ copy.deepcopy(self.default_builder) def test_p_tag_is_never_empty_element(self): """A <p> tag is never designated as an empty-element tag. Even if the markup shows it as an empty-element tag, it shouldn't be presented that way. """ soup = self.soup("<p/>") self.assertFalse(soup.p.is_empty_element) self.assertEqual(str(soup.p), "<p></p>") def test_unclosed_tags_get_closed(self): """A tag that's not closed by the end of the document should be closed. This applies to all tags except empty-element tags. """ self.assertSoupEquals("<p>", "<p></p>") self.assertSoupEquals("<b>", "<b></b>") self.assertSoupEquals("<br>", "<br/>") def test_br_is_always_empty_element_tag(self): """A <br> tag is designated as an empty-element tag. Some parsers treat <br></br> as one <br/> tag, some parsers as two tags, but it should always be an empty-element tag. """ soup = self.soup("<br></br>") self.assertTrue(soup.br.is_empty_element) self.assertEqual(str(soup.br), "<br/>") def test_nested_formatting_elements(self): self.assertSoupEquals("<em><em></em></em>") def test_comment(self): # Comments are represented as Comment objects. markup = "<p>foo<!--foobar-->baz</p>" self.assertSoupEquals(markup) soup = self.soup(markup) comment = soup.find(text="foobar") self.assertEqual(comment.__class__, Comment) # The comment is properly integrated into the tree. foo = soup.find(text="foo") self.assertEqual(comment, foo.next_element) baz = soup.find(text="baz") self.assertEqual(comment, baz.previous_element) def test_preserved_whitespace_in_pre_and_textarea(self): """Whitespace must be preserved in <pre> and <textarea> tags.""" self.assertSoupEquals("<pre> </pre>") self.assertSoupEquals("<textarea> woo </textarea>") def test_nested_inline_elements(self): """Inline elements can be nested indefinitely.""" b_tag = "<b>Inside a B tag</b>" self.assertSoupEquals(b_tag) nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>" self.assertSoupEquals(nested_b_tag) double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>" self.assertSoupEquals(nested_b_tag) def test_nested_block_level_elements(self): """Block elements can be nested.""" soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>') blockquote = soup.blockquote self.assertEqual(blockquote.p.b.string, 'Foo') self.assertEqual(blockquote.b.string, 'Foo') def test_correctly_nested_tables(self): """One table can go inside another one.""" markup = ('<table id="1">' '<tr>' "<td>Here's another table:" '<table id="2">' '<tr><td>foo</td></tr>' '</table></td>') self.assertSoupEquals( markup, '<table id="1"><tr><td>Here\'s another table:' '<table id="2"><tr><td>foo</td></tr></table>' '</td></tr></table>') self.assertSoupEquals( "<table><thead><tr><td>Foo</td></tr></thead>" "<tbody><tr><td>Bar</td></tr></tbody>" "<tfoot><tr><td>Baz</td></tr></tfoot></table>") def test_deeply_nested_multivalued_attribute(self): # html5lib can set the attributes of the same tag many times # as it rearranges the tree. This has caused problems with # multivalued attributes. markup = '<table><div><div class="css"></div></div></table>' soup = self.soup(markup) self.assertEqual(["css"], soup.div.div['class']) def test_angle_brackets_in_attribute_values_are_escaped(self): self.assertSoupEquals('<a b="<a>"></a>', '<a b="&lt;a&gt;"></a>') def test_entities_in_attributes_converted_to_unicode(self): expect = u'<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>' self.assertSoupEquals('<p id="pi&#241;ata"></p>', expect) self.assertSoupEquals('<p id="pi&#xf1;ata"></p>', expect) self.assertSoupEquals('<p id="pi&#Xf1;ata"></p>', expect) self.assertSoupEquals('<p id="pi&ntilde;ata"></p>', expect) def test_entities_in_text_converted_to_unicode(self): expect = u'<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>' self.assertSoupEquals("<p>pi&#241;ata</p>", expect) self.assertSoupEquals("<p>pi&#xf1;ata</p>", expect) self.assertSoupEquals("<p>pi&#Xf1;ata</p>", expect) self.assertSoupEquals("<p>pi&ntilde;ata</p>", expect) def test_quot_entity_converted_to_quotation_mark(self): self.assertSoupEquals("<p>I said &quot;good day!&quot;</p>", '<p>I said "good day!"</p>') def test_out_of_range_entity(self): expect = u"\N{REPLACEMENT CHARACTER}" self.assertSoupEquals("&#10000000000000;", expect) self.assertSoupEquals("&#x10000000000000;", expect) self.assertSoupEquals("&#1000000000;", expect) def test_multipart_strings(self): "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." soup = self.soup("<html><h2>\nfoo</h2><p></p></html>") self.assertEqual("p", soup.h2.string.next_element.name) self.assertEqual("p", soup.p.name) def test_basic_namespaces(self): """Parsers don't need to *understand* namespaces, but at the very least they should not choke on namespaces or lose data.""" markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>' soup = self.soup(markup) self.assertEqual(markup, soup.encode()) html = soup.html self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns']) self.assertEqual( 'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml']) self.assertEqual( 'http://www.w3.org/2000/svg', soup.html['xmlns:svg']) def test_multivalued_attribute_value_becomes_list(self): markup = b'<a class="foo bar">' soup = self.soup(markup) self.assertEqual(['foo', 'bar'], soup.a['class']) # # Generally speaking, tests below this point are more tests of # Beautiful Soup than tests of the tree builders. But parsers are # weird, so we run these tests separately for every tree builder # to detect any differences between them. # def test_can_parse_unicode_document(self): # A seemingly innocuous document... but it's in Unicode! And # it contains characters that can't be represented in the # encoding found in the declaration! The horror! markup = u'<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>' soup = self.soup(markup) self.assertEqual(u'Sacr\xe9 bleu!', soup.body.string) def test_soupstrainer(self): """Parsers should be able to work with SoupStrainers.""" strainer = SoupStrainer("b") soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>", parse_only=strainer) self.assertEqual(soup.decode(), "<b>bold</b>") def test_single_quote_attribute_values_become_double_quotes(self): self.assertSoupEquals("<foo attr='bar'></foo>", '<foo attr="bar"></foo>') def test_attribute_values_with_nested_quotes_are_left_alone(self): text = """<foo attr='bar "brawls" happen'>a</foo>""" self.assertSoupEquals(text) def test_attribute_values_with_double_nested_quotes_get_quoted(self): text = """<foo attr='bar "brawls" happen'>a</foo>""" soup = self.soup(text) soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"' self.assertSoupEquals( soup.foo.decode(), """<foo attr="Brawls happen at &quot;Bob\'s Bar&quot;">a</foo>""") def test_ampersand_in_attribute_value_gets_escaped(self): self.assertSoupEquals('<this is="really messed up & stuff"></this>', '<this is="really messed up &amp; stuff"></this>') self.assertSoupEquals( '<a href="http://example.org?a=1&b=2;3">foo</a>', '<a href="http://example.org?a=1&amp;b=2;3">foo</a>') def test_escaped_ampersand_in_attribute_value_is_left_alone(self): self.assertSoupEquals('<a href="http://example.org?a=1&amp;b=2;3"></a>') def test_entities_in_strings_converted_during_parsing(self): # Both XML and HTML entities are converted to Unicode characters # during parsing. text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>" expected = u"<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>" self.assertSoupEquals(text, expected) def test_smart_quotes_converted_on_the_way_in(self): # Microsoft smart quotes are converted to Unicode characters during # parsing. quote = b"<p>\x91Foo\x92</p>" soup = self.soup(quote) self.assertEqual( soup.p.string, u"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}") def test_non_breaking_spaces_converted_on_the_way_in(self): soup = self.soup("<a>&nbsp;&nbsp;</a>") self.assertEqual(soup.a.string, u"\N{NO-BREAK SPACE}" * 2) def test_entities_converted_on_the_way_out(self): text = "<p>&lt;&lt;sacr&eacute;&#32;bleu!&gt;&gt;</p>" expected = u"<p>&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</p>".encode("utf-8") soup = self.soup(text) self.assertEqual(soup.p.encode("utf-8"), expected) def test_real_iso_latin_document(self): # Smoke test of interrelated functionality, using an # easy-to-understand document. # Here it is in Unicode. Note that it claims to be in ISO-Latin-1. unicode_html = u'<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>' # That's because we're going to encode it into ISO-Latin-1, and use # that to test. iso_latin_html = unicode_html.encode("iso-8859-1") # Parse the ISO-Latin-1 HTML. soup = self.soup(iso_latin_html) # Encode it to UTF-8. result = soup.encode("utf-8") # What do we expect the result to look like? Well, it would # look like unicode_html, except that the META tag would say # UTF-8 instead of ISO-Latin-1. expected = unicode_html.replace("ISO-Latin-1", "utf-8") # And, of course, it would be in UTF-8, not Unicode. expected = expected.encode("utf-8") # Ta-da! self.assertEqual(result, expected) def test_real_shift_jis_document(self): # Smoke test to make sure the parser can handle a document in # Shift-JIS encoding, without choking. shift_jis_html = ( b'<html><head></head><body><pre>' b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f' b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c' b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B' b'</pre></body></html>') unicode_html = shift_jis_html.decode("shift-jis") soup = self.soup(unicode_html) # Make sure the parse tree is correctly encoded to various # encodings. self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8")) self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp")) def test_real_hebrew_document(self): # A real-world test to make sure we can convert ISO-8859-9 (a # Hebrew encoding) to UTF-8. hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>' soup = self.soup( hebrew_document, from_encoding="iso8859-8") self.assertEqual(soup.original_encoding, 'iso8859-8') self.assertEqual( soup.encode('utf-8'), hebrew_document.decode("iso8859-8").encode("utf-8")) def test_meta_tag_reflects_current_encoding(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = ('<meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/>') # Here's a document incorporating that meta tag. shift_jis_html = ( '<html><head>\n%s\n' '<meta http-equiv="Content-language" content="ja"/>' '</head><body>Shift-JIS markup goes here.') % meta_tag soup = self.soup(shift_jis_html) # Parse the document, and the charset is seemingly unaffected. parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'}) content = parsed_meta['content'] self.assertEqual('text/html; charset=x-sjis', content) # But that value is actually a ContentMetaAttributeValue object. self.assertTrue(isinstance(content, ContentMetaAttributeValue)) # And it will take on a value that reflects its current # encoding. self.assertEqual('text/html; charset=utf8', content.encode("utf8")) # For the rest of the story, see TestSubstitutions in # test_tree.py. def test_html5_style_meta_tag_reflects_current_encoding(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = ('<meta id="encoding" charset="x-sjis" />') # Here's a document incorporating that meta tag. shift_jis_html = ( '<html><head>\n%s\n' '<meta http-equiv="Content-language" content="ja"/>' '</head><body>Shift-JIS markup goes here.') % meta_tag soup = self.soup(shift_jis_html) # Parse the document, and the charset is seemingly unaffected. parsed_meta = soup.find('meta', id="encoding") charset = parsed_meta['charset'] self.assertEqual('x-sjis', charset) # But that value is actually a CharsetMetaAttributeValue object. self.assertTrue(isinstance(charset, CharsetMetaAttributeValue)) # And it will take on a value that reflects its current # encoding. self.assertEqual('utf8', charset.encode("utf8")) def test_tag_with_no_attributes_can_have_attributes_added(self): data = self.soup("<a>text</a>") data.a['foo'] = 'bar' self.assertEqual('<a foo="bar">text</a>', data.a.decode()) class XMLTreeBuilderSmokeTest(object): def test_docstring_generated(self): soup = self.soup("<root/>") self.assertEqual( soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>') def test_real_xhtml_document(self): """A real XHTML document should come out *exactly* the same as it went in.""" markup = b"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><title>Hello.</title></head> <body>Goodbye.</body> </html>""" soup = self.soup(markup) self.assertEqual( soup.encode("utf-8"), markup) def test_formatter_processes_script_tag_for_xml_documents(self): doc = """ <script type="text/javascript"> </script> """ soup = BeautifulSoup(doc, "xml") # lxml would have stripped this while parsing, but we can add # it later. soup.script.string = 'console.log("< < hey > > ");' encoded = soup.encode() self.assertTrue(b"&lt; &lt; hey &gt; &gt;" in encoded) def test_can_parse_unicode_document(self): markup = u'<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>' soup = self.soup(markup) self.assertEqual(u'Sacr\xe9 bleu!', soup.root.string) def test_popping_namespaced_tag(self): markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>' soup = self.soup(markup) self.assertEqual( unicode(soup.rss), markup) def test_docstring_includes_correct_encoding(self): soup = self.soup("<root/>") self.assertEqual( soup.encode("latin1"), b'<?xml version="1.0" encoding="latin1"?>\n<root/>') def test_large_xml_document(self): """A large XML document should come out the same as it went in.""" markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>' + b'0' * (2**12) + b'</root>') soup = self.soup(markup) self.assertEqual(soup.encode("utf-8"), markup) def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): self.assertSoupEquals("<p>", "<p/>") self.assertSoupEquals("<p>foo</p>") def test_namespaces_are_preserved(self): markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>' soup = self.soup(markup) root = soup.root self.assertEqual("http://example.com/", root['xmlns:a']) self.assertEqual("http://example.net/", root['xmlns:b']) def test_closing_namespaced_tag(self): markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>' soup = self.soup(markup) self.assertEqual(unicode(soup.p), markup) def test_namespaced_attributes(self): markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>' soup = self.soup(markup) self.assertEqual(unicode(soup.foo), markup) def test_namespaced_attributes_xml_namespace(self): markup = '<foo xml:lang="fr">bar</foo>' soup = self.soup(markup) self.assertEqual(unicode(soup.foo), markup) class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): """Smoke test for a tree builder that supports HTML5.""" def test_real_xhtml_document(self): # Since XHTML is not HTML5, HTML5 parsers are not tested to handle # XHTML documents in any particular way. pass def test_html_tags_have_namespace(self): markup = "<a>" soup = self.soup(markup) self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace) def test_svg_tags_have_namespace(self): markup = '<svg><circle/></svg>' soup = self.soup(markup) namespace = "http://www.w3.org/2000/svg" self.assertEqual(namespace, soup.svg.namespace) self.assertEqual(namespace, soup.circle.namespace) def test_mathml_tags_have_namespace(self): markup = '<math><msqrt>5</msqrt></math>' soup = self.soup(markup) namespace = 'http://www.w3.org/1998/Math/MathML' self.assertEqual(namespace, soup.math.namespace) self.assertEqual(namespace, soup.msqrt.namespace) def test_xml_declaration_becomes_comment(self): markup = '<?xml version="1.0" encoding="utf-8"?><html></html>' soup = self.soup(markup) self.assertTrue(isinstance(soup.contents[0], Comment)) self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?') self.assertEqual("html", soup.contents[0].next_element.name) def skipIf(condition, reason): def nothing(test, *args, **kwargs): return None def decorator(test_item): if condition: return nothing else: return test_item return decorator
bsd-2-clause
zhhf/charging
charging/plugins/cisco/common/cisco_exceptions.py
11
7811
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2011 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Sumit Naiksatam, Cisco Systems, Inc. # @author: Rohit Agarwalla, Cisco Systems, Inc. """Exceptions used by the Cisco plugin.""" from neutron.common import exceptions class NetworkSegmentIDNotFound(exceptions.NeutronException): """Segmentation ID for network is not found.""" message = _("Segmentation ID for network %(net_id)s is not found.") class NoMoreNics(exceptions.NeutronException): """No more dynamic NICs are available in the system.""" message = _("Unable to complete operation. No more dynamic NICs are " "available in the system.") class NetworkVlanBindingAlreadyExists(exceptions.NeutronException): """Binding cannot be created, since it already exists.""" message = _("NetworkVlanBinding for %(vlan_id)s and network " "%(network_id)s already exists.") class VlanIDNotFound(exceptions.NeutronException): """VLAN ID cannot be found.""" message = _("Vlan ID %(vlan_id)s not found.") class VlanIDOutsidePool(exceptions.NeutronException): """VLAN ID cannot be allocated, since it is outside the configured pool.""" message = _("Unable to complete operation. VLAN ID exists outside of the " "configured network segment range.") class VlanIDNotAvailable(exceptions.NeutronException): """No VLAN ID available.""" message = _("No Vlan ID available.") class QosNotFound(exceptions.NeutronException): """QoS level with this ID cannot be found.""" message = _("QoS level %(qos_id)s could not be found " "for tenant %(tenant_id)s.") class QosNameAlreadyExists(exceptions.NeutronException): """QoS Name already exists.""" message = _("QoS level with name %(qos_name)s already exists " "for tenant %(tenant_id)s.") class CredentialNotFound(exceptions.NeutronException): """Credential with this ID cannot be found.""" message = _("Credential %(credential_id)s could not be found.") class CredentialNameNotFound(exceptions.NeutronException): """Credential Name could not be found.""" message = _("Credential %(credential_name)s could not be found.") class CredentialAlreadyExists(exceptions.NeutronException): """Credential already exists.""" message = _("Credential %(credential_name)s already exists.") class ProviderNetworkExists(exceptions.NeutronException): """Provider network already exists.""" message = _("Provider network %s already exists") class NexusComputeHostNotConfigured(exceptions.NeutronException): """Connection to compute host is not configured.""" message = _("Connection to %(host)s is not configured.") class NexusConnectFailed(exceptions.NeutronException): """Failed to connect to Nexus switch.""" message = _("Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s.") class NexusConfigFailed(exceptions.NeutronException): """Failed to configure Nexus switch.""" message = _("Failed to configure Nexus: %(config)s. Reason: %(exc)s.") class NexusPortBindingNotFound(exceptions.NeutronException): """NexusPort Binding is not present.""" message = _("Nexus Port Binding (%(filters)s) is not present.") def __init__(self, **kwargs): filters = ','.join('%s=%s' % i for i in kwargs.items()) super(NexusPortBindingNotFound, self).__init__(filters=filters) class NoNexusSviSwitch(exceptions.NeutronException): """No usable nexus switch found.""" message = _("No usable Nexus switch found to create SVI interface.") class PortVnicBindingAlreadyExists(exceptions.NeutronException): """PortVnic Binding already exists.""" message = _("PortVnic Binding %(port_id)s already exists.") class PortVnicNotFound(exceptions.NeutronException): """PortVnic Binding is not present.""" message = _("PortVnic Binding %(port_id)s is not present.") class SubnetNotSpecified(exceptions.NeutronException): """Subnet id not specified.""" message = _("No subnet_id specified for router gateway.") class SubnetInterfacePresent(exceptions.NeutronException): """Subnet SVI interface already exists.""" message = _("Subnet %(subnet_id)s has an interface on %(router_id)s.") class PortIdForNexusSvi(exceptions.NeutronException): """Port Id specified for Nexus SVI.""" message = _('Nexus hardware router gateway only uses Subnet Ids.') class InvalidDetach(exceptions.NeutronException): message = _("Unable to unplug the attachment %(att_id)s from port " "%(port_id)s for network %(net_id)s. The attachment " "%(att_id)s does not exist.") class PolicyProfileAlreadyExists(exceptions.NeutronException): """Policy Profile cannot be created since it already exists.""" message = _("Policy Profile %(profile_id)s " "already exists.") class PolicyProfileIdNotFound(exceptions.NotFound): """Policy Profile with the given UUID cannot be found.""" message = _("Policy Profile %(profile_id)s could not be found.") class NetworkProfileAlreadyExists(exceptions.NeutronException): """Network Profile cannot be created since it already exists.""" message = _("Network Profile %(profile_id)s " "already exists.") class NetworkProfileNotFound(exceptions.NotFound): """Network Profile with the given UUID/name cannot be found.""" message = _("Network Profile %(profile)s could not be found.") class NoMoreNetworkSegments(exceptions.NoNetworkAvailable): """Network segments exhausted for the given network profile.""" message = _("No more segments available in network segment pool " "%(network_profile_name)s.") class VMNetworkNotFound(exceptions.NotFound): """VM Network with the given name cannot be found.""" message = _("VM Network %(name)s could not be found.") class VxlanIdInUse(exceptions.InUse): """VXLAN ID is in use.""" message = _("Unable to create the network. " "The VXLAN ID %(vxlan_id)s is in use.") class VSMConnectionFailed(exceptions.ServiceUnavailable): """Connection to VSM failed.""" message = _("Connection to VSM failed: %(reason)s.") class VSMError(exceptions.NeutronException): """Error has occurred on the VSM.""" message = _("Internal VSM Error: %(reason)s.") class NetworkBindingNotFound(exceptions.NotFound): """Network Binding for network cannot be found.""" message = _("Network Binding for network %(network_id)s could " "not be found.") class PortBindingNotFound(exceptions.NotFound): """Port Binding for port cannot be found.""" message = _("Port Binding for port %(port_id)s could " "not be found.") class ProfileTenantBindingNotFound(exceptions.NotFound): """Profile to Tenant binding for given profile ID cannot be found.""" message = _("Profile-Tenant binding for profile %(profile_id)s could " "not be found.") class NoClusterFound(exceptions.NotFound): """No service cluster found to perform multi-segment bridging.""" message = _("No service cluster found to perform multi-segment bridging.")
apache-2.0
rooty/minishopgae
werkzeug/testsuite/test.py
66
14718
# -*- coding: utf-8 -*- """ werkzeug.testsuite.test ~~~~~~~~~~~~~~~~~~~~~~~ Tests the testing tools. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from __future__ import with_statement import sys import unittest from cStringIO import StringIO, OutputType from werkzeug.testsuite import WerkzeugTestCase from werkzeug.wrappers import Request, Response, BaseResponse from werkzeug.test import Client, EnvironBuilder, create_environ, \ ClientRedirectError, stream_encode_multipart, run_wsgi_app from werkzeug.utils import redirect from werkzeug.formparser import parse_form_data from werkzeug.datastructures import MultiDict def cookie_app(environ, start_response): """A WSGI application which sets a cookie, and returns as a ersponse any cookie which exists. """ response = Response(environ.get('HTTP_COOKIE', 'No Cookie'), mimetype='text/plain') response.set_cookie('test', 'test') return response(environ, start_response) def redirect_loop_app(environ, start_response): response = redirect('http://localhost/some/redirect/') return response(environ, start_response) def redirect_with_get_app(environ, start_response): req = Request(environ) if req.url not in ('http://localhost/', 'http://localhost/first/request', 'http://localhost/some/redirect/'): assert False, 'redirect_demo_app() did not expect URL "%s"' % req.url if '/some/redirect' not in req.url: response = redirect('http://localhost/some/redirect/') else: response = Response('current url: %s' % req.url) return response(environ, start_response) def redirect_with_post_app(environ, start_response): req = Request(environ) if req.url == 'http://localhost/some/redirect/': assert req.method == 'GET', 'request should be GET' assert not req.form, 'request should not have data' response = Response('current url: %s' % req.url) else: response = redirect('http://localhost/some/redirect/') return response(environ, start_response) def external_redirect_demo_app(environ, start_response): response = redirect('http://example.com/') return response(environ, start_response) def external_subdomain_redirect_demo_app(environ, start_response): if 'test.example.com' in environ['HTTP_HOST']: response = Response('redirected successfully to subdomain') else: response = redirect('http://test.example.com/login') return response(environ, start_response) def multi_value_post_app(environ, start_response): req = Request(environ) assert req.form['field'] == 'val1', req.form['field'] assert req.form.getlist('field') == ['val1', 'val2'], req.form.getlist('field') response = Response('ok') return response(environ, start_response) class TestTestCase(WerkzeugTestCase): def test_cookie_forging(self): c = Client(cookie_app) c.set_cookie('localhost', 'foo', 'bar') appiter, code, headers = c.open() assert list(appiter) == ['foo=bar'] def test_set_cookie_app(self): c = Client(cookie_app) appiter, code, headers = c.open() assert 'Set-Cookie' in dict(headers) def test_cookiejar_stores_cookie(self): c = Client(cookie_app) appiter, code, headers = c.open() assert 'test' in c.cookie_jar._cookies['localhost.local']['/'] def test_no_initial_cookie(self): c = Client(cookie_app) appiter, code, headers = c.open() assert ''.join(appiter) == 'No Cookie' def test_resent_cookie(self): c = Client(cookie_app) c.open() appiter, code, headers = c.open() assert ''.join(appiter) == 'test=test' def test_disable_cookies(self): c = Client(cookie_app, use_cookies=False) c.open() appiter, code, headers = c.open() assert ''.join(appiter) == 'No Cookie' def test_cookie_for_different_path(self): c = Client(cookie_app) c.open('/path1') appiter, code, headers = c.open('/path2') assert ''.join(appiter) == 'test=test' def test_environ_builder_basics(self): b = EnvironBuilder() assert b.content_type is None b.method = 'POST' assert b.content_type == 'application/x-www-form-urlencoded' b.files.add_file('test', StringIO('test contents'), 'test.txt') assert b.files['test'].content_type == 'text/plain' assert b.content_type == 'multipart/form-data' b.form['test'] = 'normal value' req = b.get_request() b.close() assert req.url == 'http://localhost/' assert req.method == 'POST' assert req.form['test'] == 'normal value' assert req.files['test'].content_type == 'text/plain' assert req.files['test'].filename == 'test.txt' assert req.files['test'].read() == 'test contents' def test_environ_builder_headers(self): b = EnvironBuilder(environ_base={'HTTP_USER_AGENT': 'Foo/0.1'}, environ_overrides={'wsgi.version': (1, 1)}) b.headers['X-Suck-My-Dick'] = 'very well sir' env = b.get_environ() assert env['HTTP_USER_AGENT'] == 'Foo/0.1' assert env['HTTP_X_SUCK_MY_DICK'] == 'very well sir' assert env['wsgi.version'] == (1, 1) b.headers['User-Agent'] = 'Bar/1.0' env = b.get_environ() assert env['HTTP_USER_AGENT'] == 'Bar/1.0' def test_environ_builder_paths(self): b = EnvironBuilder(path='/foo', base_url='http://example.com/') assert b.base_url == 'http://example.com/' assert b.path == '/foo' assert b.script_root == '' assert b.host == 'example.com' b = EnvironBuilder(path='/foo', base_url='http://example.com/bar') assert b.base_url == 'http://example.com/bar/' assert b.path == '/foo' assert b.script_root == '/bar' assert b.host == 'example.com' b.host = 'localhost' assert b.base_url == 'http://localhost/bar/' b.base_url = 'http://localhost:8080/' assert b.host == 'localhost:8080' assert b.server_name == 'localhost' assert b.server_port == 8080 b.host = 'foo.invalid' b.url_scheme = 'https' b.script_root = '/test' env = b.get_environ() assert env['SERVER_NAME'] == 'foo.invalid' assert env['SERVER_PORT'] == '443' assert env['SCRIPT_NAME'] == '/test' assert env['PATH_INFO'] == '/foo' assert env['HTTP_HOST'] == 'foo.invalid' assert env['wsgi.url_scheme'] == 'https' assert b.base_url == 'https://foo.invalid/test/' def test_environ_builder_content_type(self): builder = EnvironBuilder() assert builder.content_type is None builder.method = 'POST' assert builder.content_type == 'application/x-www-form-urlencoded' builder.form['foo'] = 'bar' assert builder.content_type == 'application/x-www-form-urlencoded' builder.files.add_file('blafasel', StringIO('foo'), 'test.txt') assert builder.content_type == 'multipart/form-data' req = builder.get_request() assert req.form['foo'] == 'bar' assert req.files['blafasel'].read() == 'foo' def test_environ_builder_stream_switch(self): d = MultiDict(dict(foo=u'bar', blub=u'blah', hu=u'hum')) for use_tempfile in False, True: stream, length, boundary = stream_encode_multipart( d, use_tempfile, threshold=150) assert isinstance(stream, OutputType) != use_tempfile form = parse_form_data({'wsgi.input': stream, 'CONTENT_LENGTH': str(length), 'CONTENT_TYPE': 'multipart/form-data; boundary="%s"' % boundary})[1] assert form == d def test_create_environ(self): env = create_environ('/foo?bar=baz', 'http://example.org/') expected = { 'wsgi.multiprocess': False, 'wsgi.version': (1, 0), 'wsgi.run_once': False, 'wsgi.errors': sys.stderr, 'wsgi.multithread': False, 'wsgi.url_scheme': 'http', 'SCRIPT_NAME': '', 'CONTENT_TYPE': '', 'CONTENT_LENGTH': '0', 'SERVER_NAME': 'example.org', 'REQUEST_METHOD': 'GET', 'HTTP_HOST': 'example.org', 'PATH_INFO': '/foo', 'SERVER_PORT': '80', 'SERVER_PROTOCOL': 'HTTP/1.1', 'QUERY_STRING': 'bar=baz' } for key, value in expected.iteritems(): assert env[key] == value assert env['wsgi.input'].read(0) == '' assert create_environ('/foo', 'http://example.com/')['SCRIPT_NAME'] == '' def test_file_closing(self): closed = [] class SpecialInput(object): def read(self): return '' def close(self): closed.append(self) env = create_environ(data={'foo': SpecialInput()}) assert len(closed) == 1 builder = EnvironBuilder() builder.files.add_file('blah', SpecialInput()) builder.close() assert len(closed) == 2 def test_follow_redirect(self): env = create_environ('/', base_url='http://localhost') c = Client(redirect_with_get_app) appiter, code, headers = c.open(environ_overrides=env, follow_redirects=True) assert code == '200 OK' assert ''.join(appiter) == 'current url: http://localhost/some/redirect/' # Test that the :cls:`Client` is aware of user defined response wrappers c = Client(redirect_with_get_app, response_wrapper=BaseResponse) resp = c.get('/', follow_redirects=True) assert resp.status_code == 200 assert resp.data == 'current url: http://localhost/some/redirect/' # test with URL other than '/' to make sure redirected URL's are correct c = Client(redirect_with_get_app, response_wrapper=BaseResponse) resp = c.get('/first/request', follow_redirects=True) assert resp.status_code == 200 assert resp.data == 'current url: http://localhost/some/redirect/' def test_follow_external_redirect(self): env = create_environ('/', base_url='http://localhost') c = Client(external_redirect_demo_app) self.assert_raises(RuntimeError, lambda: c.get(environ_overrides=env, follow_redirects=True)) def test_follow_external_redirect_on_same_subdomain(self): env = create_environ('/', base_url='http://example.com') c = Client(external_subdomain_redirect_demo_app, allow_subdomain_redirects=True) c.get(environ_overrides=env, follow_redirects=True) # check that this does not work for real external domains env = create_environ('/', base_url='http://localhost') self.assert_raises(RuntimeError, lambda: c.get(environ_overrides=env, follow_redirects=True)) # check that subdomain redirects fail if no `allow_subdomain_redirects` is applied c = Client(external_subdomain_redirect_demo_app) self.assert_raises(RuntimeError, lambda: c.get(environ_overrides=env, follow_redirects=True)) def test_follow_redirect_loop(self): c = Client(redirect_loop_app, response_wrapper=BaseResponse) with self.assert_raises(ClientRedirectError): resp = c.get('/', follow_redirects=True) def test_follow_redirect_with_post(self): c = Client(redirect_with_post_app, response_wrapper=BaseResponse) resp = c.post('/', follow_redirects=True, data='foo=blub+hehe&blah=42') assert resp.status_code == 200 assert resp.data == 'current url: http://localhost/some/redirect/' def test_path_info_script_name_unquoting(self): def test_app(environ, start_response): start_response('200 OK', [('Content-Type', 'text/plain')]) return [environ['PATH_INFO'] + '\n' + environ['SCRIPT_NAME']] c = Client(test_app, response_wrapper=BaseResponse) resp = c.get('/foo%40bar') assert resp.data == '/foo@bar\n' c = Client(test_app, response_wrapper=BaseResponse) resp = c.get('/foo%40bar', 'http://localhost/bar%40baz') assert resp.data == '/foo@bar\n/bar@baz' def test_multi_value_submit(self): c = Client(multi_value_post_app, response_wrapper=BaseResponse) data = { 'field': ['val1','val2'] } resp = c.post('/', data=data) assert resp.status_code == 200 c = Client(multi_value_post_app, response_wrapper=BaseResponse) data = MultiDict({ 'field': ['val1','val2'] }) resp = c.post('/', data=data) assert resp.status_code == 200 def test_iri_support(self): b = EnvironBuilder(u'/föö-bar', base_url=u'http://☃.net/') assert b.path == '/f%C3%B6%C3%B6-bar' assert b.base_url == 'http://xn--n3h.net/' def test_run_wsgi_apps(self): def simple_app(environ, start_response): start_response('200 OK', [('Content-Type', 'text/html')]) return ['Hello World!'] app_iter, status, headers = run_wsgi_app(simple_app, {}) assert status == '200 OK' assert headers == [('Content-Type', 'text/html')] assert app_iter == ['Hello World!'] def yielding_app(environ, start_response): start_response('200 OK', [('Content-Type', 'text/html')]) yield 'Hello ' yield 'World!' app_iter, status, headers = run_wsgi_app(yielding_app, {}) assert status == '200 OK' assert headers == [('Content-Type', 'text/html')] assert list(app_iter) == ['Hello ', 'World!'] def test_multiple_cookies(self): @Request.application def test_app(request): response = Response(repr(sorted(request.cookies.items()))) response.set_cookie('test1', 'foo') response.set_cookie('test2', 'bar') return response client = Client(test_app, Response) resp = client.get('/') assert resp.data == '[]' resp = client.get('/') assert resp.data == "[('test1', u'foo'), ('test2', u'bar')]" def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(TestTestCase)) return suite
gpl-3.0
xpostudio4/red-de-emprendimiento
app/institutions/views.py
1
4872
from django.contrib.auth import (login as django_login, authenticate, logout as django_logout) from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import SetPasswordForm from django.http import JsonResponse, HttpResponseRedirect from django.shortcuts import render, get_object_or_404 from django.views.decorators.http import require_POST from .models import Event, Organization, UserProfile from .forms import (CustomUserCreationForm, DashboardUserCreationForm, EventForm, OrganizationForm, UserProfileLoginForm) @login_required def approve_organization(request, organization_id): """This function responde to an ajax request asking to approve an organization""" if request.user.is_admin: organization = Organization.objects.get(id=organization_id) organization.is_active = True organization.save() return JsonResponse({'is_approved': True}) return JsonResponse({'is_approved': False, 'reason': 'Solo los administradores pueden aprobar'}) @require_POST @login_required def create_event(request): """This view creates a new event from a registered organization, it returns Json""" form = EventForm(request.POST or None) if form.is_valid(): event = form.save(commit=False) event.organization = request.user.organization event.save() form.save_m2m() return HttpResponseRedirect('/dashboard/') @require_POST @login_required def dashboard_usercreation(request): """ This view helps to create a new user of the person belonging to the organization. """ user_form = DashboardUserCreationForm(request.POST or None) if user_form.is_valid(): new_user = user_form.save(commit=False) new_user.organization = request.user.organization new_user.save() return HttpResponseRedirect('/dashboard/') @require_POST @login_required def dashboard_userdeletion(request, user_id): """ This view helps to delete the users asociated with an organization after validating the user creating the view is not himself or does belong to the organization """ user_to_delete = UserProfile.objects.get(pk=user_id) if user_to_delete.organization == request.user.organization: user_to_delete.delete() return JsonResponse({'is_deleted': True}) return JsonResponse({'is_deleted': False}) @require_POST @login_required def delete_event(request, event_id): """ This view deletes the event after receiving a POST request. """ event = get_object_or_404(Event, id=event_id) if request.user.organization == event.organization: event.delete() return JsonResponse({"is_deleted": True}) return JsonResponse({"is_deleted": False}) @require_POST @login_required def password_change(request): """This view process the password change of the user, returns Json""" password_form = SetPasswordForm(request.user, request.POST or None) #if form is valid if password_form.is_valid(): #process the form by saving password_form.save() return JsonResponse({'is_changed': True}) else: #else return the error as ajax print password_form.errors return JsonResponse({'is_changed': False, 'reasons': str(password_form.errors)}) @require_POST def signin(request): """ Log in view """ form = UserProfileLoginForm(data=request.POST or None) if request.method == 'POST': if form.is_valid(): user = authenticate(email=request.POST['username'], password=request.POST['password']) if user is not None and user.is_active: django_login(request, user) return JsonResponse({'is_loggedin': True}) return JsonResponse({'is_loggedin': False, 'reason': "La contrase&ntilde;a es incorrecta"}) def signup(request): """ User registration view. """ if request.user.is_authenticated(): return HttpResponseRedirect('/') user_form = CustomUserCreationForm(request.POST or None) organization_form = OrganizationForm(request.POST or None) if request.method == 'POST': if user_form.is_valid() and organization_form.is_valid(): organization = organization_form.save() user = user_form.save(commit=False) user.is_admin = False user.organization = organization user.save() return HttpResponseRedirect('/') return render(request, 'accounts/signup.html', {'user_form': user_form, 'organization_form': organization_form}, )
mit
yoer/hue
desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Protocol/Chaffing.py
127
10245
# # Chaffing.py : chaffing & winnowing support # # Part of the Python Cryptography Toolkit # # Written by Andrew M. Kuchling, Barry A. Warsaw, and others # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== # """This file implements the chaffing algorithm. Winnowing and chaffing is a technique for enhancing privacy without requiring strong encryption. In short, the technique takes a set of authenticated message blocks (the wheat) and adds a number of chaff blocks which have randomly chosen data and MAC fields. This means that to an adversary, the chaff blocks look as valid as the wheat blocks, and so the authentication would have to be performed on every block. By tailoring the number of chaff blocks added to the message, the sender can make breaking the message computationally infeasible. There are many other interesting properties of the winnow/chaff technique. For example, say Alice is sending a message to Bob. She packetizes the message and performs an all-or-nothing transformation on the packets. Then she authenticates each packet with a message authentication code (MAC). The MAC is a hash of the data packet, and there is a secret key which she must share with Bob (key distribution is an exercise left to the reader). She then adds a serial number to each packet, and sends the packets to Bob. Bob receives the packets, and using the shared secret authentication key, authenticates the MACs for each packet. Those packets that have bad MACs are simply discarded. The remainder are sorted by serial number, and passed through the reverse all-or-nothing transform. The transform means that an eavesdropper (say Eve) must acquire all the packets before any of the data can be read. If even one packet is missing, the data is useless. There's one twist: by adding chaff packets, Alice and Bob can make Eve's job much harder, since Eve now has to break the shared secret key, or try every combination of wheat and chaff packet to read any of the message. The cool thing is that Bob doesn't need to add any additional code; the chaff packets are already filtered out because their MACs don't match (in all likelihood -- since the data and MACs for the chaff packets are randomly chosen it is possible, but very unlikely that a chaff MAC will match the chaff data). And Alice need not even be the party adding the chaff! She could be completely unaware that a third party, say Charles, is adding chaff packets to her messages as they are transmitted. For more information on winnowing and chaffing see this paper: Ronald L. Rivest, "Chaffing and Winnowing: Confidentiality without Encryption" http://theory.lcs.mit.edu/~rivest/chaffing.txt """ __revision__ = "$Id$" from Crypto.Util.number import bytes_to_long class Chaff: """Class implementing the chaff adding algorithm. Methods for subclasses: _randnum(size): Returns a randomly generated number with a byte-length equal to size. Subclasses can use this to implement better random data and MAC generating algorithms. The default algorithm is probably not very cryptographically secure. It is most important that the chaff data does not contain any patterns that can be used to discern it from wheat data without running the MAC. """ def __init__(self, factor=1.0, blocksper=1): """Chaff(factor:float, blocksper:int) factor is the number of message blocks to add chaff to, expressed as a percentage between 0.0 and 1.0. blocksper is the number of chaff blocks to include for each block being chaffed. Thus the defaults add one chaff block to every message block. By changing the defaults, you can adjust how computationally difficult it could be for an adversary to brute-force crack the message. The difficulty is expressed as: pow(blocksper, int(factor * number-of-blocks)) For ease of implementation, when factor < 1.0, only the first int(factor*number-of-blocks) message blocks are chaffed. """ if not (0.0<=factor<=1.0): raise ValueError, "'factor' must be between 0.0 and 1.0" if blocksper < 0: raise ValueError, "'blocksper' must be zero or more" self.__factor = factor self.__blocksper = blocksper def chaff(self, blocks): """chaff( [(serial-number:int, data:string, MAC:string)] ) : [(int, string, string)] Add chaff to message blocks. blocks is a list of 3-tuples of the form (serial-number, data, MAC). Chaff is created by choosing a random number of the same byte-length as data, and another random number of the same byte-length as MAC. The message block's serial number is placed on the chaff block and all the packet's chaff blocks are randomly interspersed with the single wheat block. This method then returns a list of 3-tuples of the same form. Chaffed blocks will contain multiple instances of 3-tuples with the same serial number, but the only way to figure out which blocks are wheat and which are chaff is to perform the MAC hash and compare values. """ chaffedblocks = [] # count is the number of blocks to add chaff to. blocksper is the # number of chaff blocks to add per message block that is being # chaffed. count = len(blocks) * self.__factor blocksper = range(self.__blocksper) for i, wheat in zip(range(len(blocks)), blocks): # it shouldn't matter which of the n blocks we add chaff to, so for # ease of implementation, we'll just add them to the first count # blocks if i < count: serial, data, mac = wheat datasize = len(data) macsize = len(mac) addwheat = 1 # add chaff to this block for j in blocksper: import sys chaffdata = self._randnum(datasize) chaffmac = self._randnum(macsize) chaff = (serial, chaffdata, chaffmac) # mix up the order, if the 5th bit is on then put the # wheat on the list if addwheat and bytes_to_long(self._randnum(16)) & 0x40: chaffedblocks.append(wheat) addwheat = 0 chaffedblocks.append(chaff) if addwheat: chaffedblocks.append(wheat) else: # just add the wheat chaffedblocks.append(wheat) return chaffedblocks def _randnum(self, size): from Crypto import Random return Random.new().read(size) if __name__ == '__main__': text = """\ We hold these truths to be self-evident, that all men are created equal, that they are endowed by their Creator with certain unalienable Rights, that among these are Life, Liberty, and the pursuit of Happiness. That to secure these rights, Governments are instituted among Men, deriving their just powers from the consent of the governed. That whenever any Form of Government becomes destructive of these ends, it is the Right of the People to alter or to abolish it, and to institute new Government, laying its foundation on such principles and organizing its powers in such form, as to them shall seem most likely to effect their Safety and Happiness. """ print 'Original text:\n==========' print text print '==========' # first transform the text into packets blocks = [] ; size = 40 for i in range(0, len(text), size): blocks.append( text[i:i+size] ) # now get MACs for all the text blocks. The key is obvious... print 'Calculating MACs...' from Crypto.Hash import HMAC, SHA key = 'Jefferson' macs = [HMAC.new(key, block, digestmod=SHA).digest() for block in blocks] assert len(blocks) == len(macs) # put these into a form acceptable as input to the chaffing procedure source = [] m = zip(range(len(blocks)), blocks, macs) print m for i, data, mac in m: source.append((i, data, mac)) # now chaff these print 'Adding chaff...' c = Chaff(factor=0.5, blocksper=2) chaffed = c.chaff(source) from base64 import encodestring # print the chaffed message blocks. meanwhile, separate the wheat from # the chaff wheat = [] print 'chaffed message blocks:' for i, data, mac in chaffed: # do the authentication h = HMAC.new(key, data, digestmod=SHA) pmac = h.digest() if pmac == mac: tag = '-->' wheat.append(data) else: tag = ' ' # base64 adds a trailing newline print tag, '%3d' % i, \ repr(data), encodestring(mac)[:-1] # now decode the message packets and check it against the original text print 'Undigesting wheat...' # PY3K: This is meant to be text, do not change to bytes (data) newtext = "".join(wheat) if newtext == text: print 'They match!' else: print 'They differ!'
apache-2.0
Leoniela/nipype
nipype/interfaces/fsl/tests/test_auto_Eddy.py
9
1850
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.fsl.epi import Eddy def test_Eddy_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), flm=dict(argstr='--flm=%s', ), fwhm=dict(argstr='--fwhm=%s', ), ignore_exception=dict(nohash=True, usedefault=True, ), in_acqp=dict(argstr='--acqp=%s', mandatory=True, ), in_bval=dict(argstr='--bvals=%s', mandatory=True, ), in_bvec=dict(argstr='--bvecs=%s', mandatory=True, ), in_file=dict(argstr='--imain=%s', mandatory=True, ), in_index=dict(argstr='--index=%s', mandatory=True, ), in_mask=dict(argstr='--mask=%s', mandatory=True, ), in_topup_fieldcoef=dict(argstr='--topup=%s', requires=['in_topup_movpar'], ), in_topup_movpar=dict(requires=['in_topup_fieldcoef'], ), method=dict(argstr='--resamp=%s', ), niter=dict(argstr='--niter=%s', ), num_threads=dict(nohash=True, usedefault=True, ), out_base=dict(argstr='--out=%s', usedefault=True, ), output_type=dict(), repol=dict(argstr='--repol', ), session=dict(argstr='--session=%s', ), terminal_output=dict(nohash=True, ), ) inputs = Eddy.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_Eddy_outputs(): output_map = dict(out_corrected=dict(), out_parameter=dict(), ) outputs = Eddy.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause
TangHao1987/intellij-community
python/lib/Lib/site-packages/django/contrib/localflavor/no/forms.py
309
2761
""" Norwegian-specific Form helpers """ import re, datetime from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import Field, RegexField, Select from django.utils.translation import ugettext_lazy as _ class NOZipCodeField(RegexField): default_error_messages = { 'invalid': _('Enter a zip code in the format XXXX.'), } def __init__(self, *args, **kwargs): super(NOZipCodeField, self).__init__(r'^\d{4}$', max_length=None, min_length=None, *args, **kwargs) class NOMunicipalitySelect(Select): """ A Select widget that uses a list of Norwegian municipalities (fylker) as its choices. """ def __init__(self, attrs=None): from no_municipalities import MUNICIPALITY_CHOICES super(NOMunicipalitySelect, self).__init__(attrs, choices=MUNICIPALITY_CHOICES) class NOSocialSecurityNumber(Field): """ Algorithm is documented at http://no.wikipedia.org/wiki/Personnummer """ default_error_messages = { 'invalid': _(u'Enter a valid Norwegian social security number.'), } def clean(self, value): super(NOSocialSecurityNumber, self).clean(value) if value in EMPTY_VALUES: return u'' if not re.match(r'^\d{11}$', value): raise ValidationError(self.error_messages['invalid']) day = int(value[:2]) month = int(value[2:4]) year2 = int(value[4:6]) inum = int(value[6:9]) self.birthday = None try: if 000 <= inum < 500: self.birthday = datetime.date(1900+year2, month, day) if 500 <= inum < 750 and year2 > 54: self.birthday = datetime.date(1800+year2, month, day) if 500 <= inum < 1000 and year2 < 40: self.birthday = datetime.date(2000+year2, month, day) if 900 <= inum < 1000 and year2 > 39: self.birthday = datetime.date(1900+year2, month, day) except ValueError: raise ValidationError(self.error_messages['invalid']) sexnum = int(value[8]) if sexnum % 2 == 0: self.gender = 'F' else: self.gender = 'M' digits = map(int, list(value)) weight_1 = [3, 7, 6, 1, 8, 9, 4, 5, 2, 1, 0] weight_2 = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2, 1] def multiply_reduce(aval, bval): return sum([(a * b) for (a, b) in zip(aval, bval)]) if multiply_reduce(digits, weight_1) % 11 != 0: raise ValidationError(self.error_messages['invalid']) if multiply_reduce(digits, weight_2) % 11 != 0: raise ValidationError(self.error_messages['invalid']) return value
apache-2.0
todaychi/hue
desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/runscript.py
35
6775
import sys import traceback from optparse import make_option from django_extensions.management.email_notifications import EmailNotificationCommand from django.conf import settings try: import importlib except ImportError: print("Runscript needs the importlib module to work. You can install it via 'pip install importlib'") sys.exit(1) from django_extensions.management.utils import signalcommand def vararg_callback(option, opt_str, opt_value, parser): parser.rargs.insert(0, opt_value) value = [] for arg in parser.rargs: # stop on --foo like options if arg[:2] == "--" and len(arg) > 2: break # stop on -a like options if arg[:1] == "-": break value.append(arg) del parser.rargs[:len(value)] setattr(parser.values, option.dest, value) class Command(EmailNotificationCommand): option_list = EmailNotificationCommand.option_list + ( make_option('--fixtures', action='store_true', dest='infixtures', default=False, help='Only look in app.fixtures subdir'), make_option('--noscripts', action='store_true', dest='noscripts', default=False, help='Look in app.scripts subdir'), make_option('-s', '--silent', action='store_true', dest='silent', default=False, help='Run silently, do not show errors and tracebacks'), make_option('--no-traceback', action='store_true', dest='no_traceback', default=False, help='Do not show tracebacks'), make_option('--script-args', action='callback', callback=vararg_callback, type='string', help='Space-separated argument list to be passed to the scripts. Note that the ' 'same arguments will be passed to all named scripts.'), ) help = 'Runs a script in django context.' args = "script [script ...]" @signalcommand def handle(self, *scripts, **options): NOTICE = self.style.SQL_TABLE NOTICE2 = self.style.SQL_FIELD ERROR = self.style.ERROR ERROR2 = self.style.NOTICE subdirs = [] if not options.get('noscripts'): subdirs.append('scripts') if options.get('infixtures'): subdirs.append('fixtures') verbosity = int(options.get('verbosity', 1)) show_traceback = options.get('traceback', True) if show_traceback is None: # XXX: traceback is set to None from Django ? show_traceback = True no_traceback = options.get('no_traceback', False) if no_traceback: show_traceback = False silent = options.get('silent', False) if silent: verbosity = 0 email_notifications = options.get('email_notifications', False) if len(subdirs) < 1: print(NOTICE("No subdirs to run left.")) return if len(scripts) < 1: print(ERROR("Script name required.")) return def run_script(mod, *script_args): try: mod.run(*script_args) if email_notifications: self.send_email_notification(notification_id=mod.__name__) except Exception: if silent: return if verbosity > 0: print(ERROR("Exception while running run() in '%s'" % mod.__name__)) if email_notifications: self.send_email_notification( notification_id=mod.__name__, include_traceback=True) if show_traceback: raise def my_import(mod): if verbosity > 1: print(NOTICE("Check for %s" % mod)) # check if module exists before importing try: importlib.import_module(mod) t = __import__(mod, [], [], [" "]) except (ImportError, AttributeError) as e: if str(e).startswith('No module named'): try: exc_type, exc_value, exc_traceback = sys.exc_info() try: if exc_traceback.tb_next.tb_next is None: return False except AttributeError: pass finally: exc_traceback = None if verbosity > 1: if verbosity > 2: traceback.print_exc() print(ERROR("Cannot import module '%s': %s." % (mod, e))) return False #if verbosity > 1: # print(NOTICE("Found script %s ..." % mod)) if hasattr(t, "run"): if verbosity > 1: print(NOTICE2("Found script '%s' ..." % mod)) #if verbosity > 1: # print(NOTICE("found run() in %s. executing..." % mod)) return t else: if verbosity > 1: print(ERROR2("Find script '%s' but no run() function found." % mod)) def find_modules_for_script(script): """ find script module which contains 'run' attribute """ modules = [] # first look in apps for app in settings.INSTALLED_APPS: for subdir in subdirs: mod = my_import("%s.%s.%s" % (app, subdir, script)) if mod: modules.append(mod) # try app.DIR.script import sa = script.split(".") for subdir in subdirs: nn = ".".join(sa[:-1] + [subdir, sa[-1]]) mod = my_import(nn) if mod: modules.append(mod) # try direct import if script.find(".") != -1: mod = my_import(script) if mod: modules.append(mod) return modules if options.get('script_args'): script_args = options['script_args'] else: script_args = [] for script in scripts: modules = find_modules_for_script(script) if not modules: if verbosity > 0 and not silent: print(ERROR("No (valid) module for script '%s' found" % script)) if verbosity < 2: print(ERROR("Try running with a higher verbosity level like: -v2 or -v3")) for mod in modules: if verbosity > 1: print(NOTICE2("Running script '%s' ..." % mod.__name__)) run_script(mod, *script_args)
apache-2.0
rismalrv/edx-platform
common/djangoapps/third_party_auth/tests/specs/base.py
36
38777
"""Base integration test for provider implementations.""" import unittest import json import mock from django import test from django.contrib import auth from django.contrib.auth import models as auth_models from django.contrib.messages.storage import fallback from django.contrib.sessions.backends import cache from django.test import utils as django_utils from django.conf import settings as django_settings from edxmako.tests import mako_middleware_process_request from social import actions, exceptions from social.apps.django_app import utils as social_utils from social.apps.django_app import views as social_views from student import models as student_models from student import views as student_views from student_account.views import account_settings_context from third_party_auth import middleware, pipeline from third_party_auth import settings as auth_settings from third_party_auth.tests import testutil @unittest.skipUnless( testutil.AUTH_FEATURES_KEY in django_settings.FEATURES, testutil.AUTH_FEATURES_KEY + ' not in settings.FEATURES') @django_utils.override_settings() # For settings reversion on a method-by-method basis. class IntegrationTest(testutil.TestCase, test.TestCase): """Abstract base class for provider integration tests.""" # Override setUp and set this: provider = None # Methods you must override in your children. def get_response_data(self): """Gets a dict of response data of the form given by the provider. To determine what the provider returns, drop into a debugger in your provider's do_auth implementation. Providers may merge different kinds of data (for example, data about the user and data about the user's credentials). """ raise NotImplementedError def get_username(self): """Gets username based on response data from a provider. Each provider has different logic for username generation. Sadly, this is not extracted into its own method in python-social-auth, so we must provide a getter ourselves. Note that this is the *initial* value the framework will attempt to use. If it collides, the pipeline will generate a new username. We extract it here so we can force collisions in a polymorphic way. """ raise NotImplementedError # Asserts you can optionally override and make more specific. def assert_redirect_to_provider_looks_correct(self, response): """Asserts the redirect to the provider's site looks correct. When we hit /auth/login/<provider>, we should be redirected to the provider's site. Here we check that we're redirected, but we don't know enough about the provider to check what we're redirected to. Child test implementations may optionally strengthen this assertion with, for example, more details about the format of the Location header. """ self.assertEqual(302, response.status_code) self.assertTrue(response.has_header('Location')) def assert_register_response_in_pipeline_looks_correct(self, response, pipeline_kwargs): """Performs spot checks of the rendered register.html page. When we display the new account registration form after the user signs in with a third party, we prepopulate the form with values sent back from the provider. The exact set of values varies on a provider-by- provider basis and is generated by provider.BaseProvider.get_register_form_data. We provide some stock assertions based on the provider's implementation; if you want more assertions in your test, override this method. """ self.assertEqual(200, response.status_code) # Check that the correct provider was selected. self.assertIn('successfully signed in with <strong>%s</strong>' % self.provider.name, response.content) # Expect that each truthy value we've prepopulated the register form # with is actually present. for prepopulated_form_value in self.provider.get_register_form_data(pipeline_kwargs).values(): if prepopulated_form_value: self.assertIn(prepopulated_form_value, response.content) # Implementation details and actual tests past this point -- no more # configuration needed. def setUp(self): super(IntegrationTest, self).setUp() self.request_factory = test.RequestFactory() @property def backend_name(self): """ Shortcut for the backend name """ return self.provider.backend_name # pylint: disable=invalid-name def assert_account_settings_context_looks_correct(self, context, _user, duplicate=False, linked=None): """Asserts the user's account settings page context is in the expected state. If duplicate is True, we expect context['duplicate_provider'] to contain the duplicate provider backend name. If linked is passed, we conditionally check that the provider is included in context['auth']['providers'] and its connected state is correct. """ if duplicate: self.assertEqual(context['duplicate_provider'], self.provider.backend_name) else: self.assertIsNone(context['duplicate_provider']) if linked is not None: expected_provider = [ provider for provider in context['auth']['providers'] if provider['name'] == self.provider.name ][0] self.assertIsNotNone(expected_provider) self.assertEqual(expected_provider['connected'], linked) def assert_exception_redirect_looks_correct(self, expected_uri, auth_entry=None): """Tests middleware conditional redirection. middleware.ExceptionMiddleware makes sure the user ends up in the right place when they cancel authentication via the provider's UX. """ exception_middleware = middleware.ExceptionMiddleware() request, _ = self.get_request_and_strategy(auth_entry=auth_entry) response = exception_middleware.process_exception( request, exceptions.AuthCanceled(request.backend)) location = response.get('Location') self.assertEqual(302, response.status_code) self.assertIn('canceled', location) self.assertIn(self.backend_name, location) self.assertTrue(location.startswith(expected_uri + '?')) def assert_first_party_auth_trumps_third_party_auth(self, email=None, password=None, success=None): """Asserts first party auth was used in place of third party auth. Args: email: string. The user's email. If not None, will be set on POST. password: string. The user's password. If not None, will be set on POST. success: None or bool. Whether we expect auth to be successful. Set to None to indicate we expect the request to be invalid (meaning one of username or password will be missing). """ _, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete') strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) self.create_user_models_for_existing_account( strategy, email, password, self.get_username(), skip_social_auth=True) strategy.request.POST = dict(strategy.request.POST) if email: strategy.request.POST['email'] = email if password: strategy.request.POST['password'] = 'bad_' + password if success is False else password self.assert_pipeline_running(strategy.request) payload = json.loads(student_views.login_user(strategy.request).content) if success is None: # Request malformed -- just one of email/password given. self.assertFalse(payload.get('success')) self.assertIn('There was an error receiving your login information', payload.get('value')) elif success: # Request well-formed and credentials good. self.assertTrue(payload.get('success')) else: # Request well-formed but credentials bad. self.assertFalse(payload.get('success')) self.assertIn('incorrect', payload.get('value')) def assert_json_failure_response_is_inactive_account(self, response): """Asserts failure on /login for inactive account looks right.""" self.assertEqual(200, response.status_code) # Yes, it's a 200 even though it's a failure. payload = json.loads(response.content) self.assertFalse(payload.get('success')) self.assertIn('This account has not been activated', payload.get('value')) def assert_json_failure_response_is_missing_social_auth(self, response): """Asserts failure on /login for missing social auth looks right.""" self.assertEqual(403, response.status_code) self.assertIn( "successfully logged into your %s account, but this account isn't linked" % self.provider.name, response.content ) def assert_json_failure_response_is_username_collision(self, response): """Asserts the json response indicates a username collision.""" self.assertEqual(400, response.status_code) payload = json.loads(response.content) self.assertFalse(payload.get('success')) self.assertIn('already exists', payload.get('value')) def assert_json_success_response_looks_correct(self, response): """Asserts the json response indicates success and redirection.""" self.assertEqual(200, response.status_code) payload = json.loads(response.content) self.assertTrue(payload.get('success')) self.assertEqual(pipeline.get_complete_url(self.provider.backend_name), payload.get('redirect_url')) def assert_login_response_before_pipeline_looks_correct(self, response): """Asserts a GET of /login not in the pipeline looks correct.""" self.assertEqual(200, response.status_code) # The combined login/registration page dynamically generates the login button, # but we can still check that the provider name is passed in the data attribute # for the container element. self.assertIn(self.provider.name, response.content) def assert_login_response_in_pipeline_looks_correct(self, response): """Asserts a GET of /login in the pipeline looks correct.""" self.assertEqual(200, response.status_code) def assert_password_overridden_by_pipeline(self, username, password): """Verifies that the given password is not correct. The pipeline overrides POST['password'], if any, with random data. """ self.assertIsNone(auth.authenticate(password=password, username=username)) def assert_pipeline_running(self, request): """Makes sure the given request is running an auth pipeline.""" self.assertTrue(pipeline.running(request)) def assert_redirect_to_dashboard_looks_correct(self, response): """Asserts a response would redirect to /dashboard.""" self.assertEqual(302, response.status_code) # pylint: disable=protected-access self.assertEqual(auth_settings._SOCIAL_AUTH_LOGIN_REDIRECT_URL, response.get('Location')) def assert_redirect_to_login_looks_correct(self, response): """Asserts a response would redirect to /login.""" self.assertEqual(302, response.status_code) self.assertEqual('/login', response.get('Location')) def assert_redirect_to_register_looks_correct(self, response): """Asserts a response would redirect to /register.""" self.assertEqual(302, response.status_code) self.assertEqual('/register', response.get('Location')) def assert_register_response_before_pipeline_looks_correct(self, response): """Asserts a GET of /register not in the pipeline looks correct.""" self.assertEqual(200, response.status_code) # The combined login/registration page dynamically generates the register button, # but we can still check that the provider name is passed in the data attribute # for the container element. self.assertIn(self.provider.name, response.content) def assert_social_auth_does_not_exist_for_user(self, user, strategy): """Asserts a user does not have an auth with the expected provider.""" social_auths = strategy.storage.user.get_social_auth_for_user( user, provider=self.provider.backend_name) self.assertEqual(0, len(social_auths)) def assert_social_auth_exists_for_user(self, user, strategy): """Asserts a user has a social auth with the expected provider.""" social_auths = strategy.storage.user.get_social_auth_for_user( user, provider=self.provider.backend_name) self.assertEqual(1, len(social_auths)) self.assertEqual(self.backend_name, social_auths[0].provider) def create_user_models_for_existing_account(self, strategy, email, password, username, skip_social_auth=False): """Creates user, profile, registration, and (usually) social auth. This synthesizes what happens during /register. See student.views.register and student.views._do_create_account. """ response_data = self.get_response_data() uid = strategy.request.backend.get_user_id(response_data, response_data) user = social_utils.Storage.user.create_user(email=email, password=password, username=username) profile = student_models.UserProfile(user=user) profile.save() registration = student_models.Registration() registration.register(user) registration.save() if not skip_social_auth: social_utils.Storage.user.create_social_auth(user, uid, self.provider.backend_name) return user def fake_auth_complete(self, strategy): """Fake implementation of social.backends.BaseAuth.auth_complete. Unlike what the docs say, it does not need to return a user instance. Sometimes (like when directing users to the /register form) it instead returns a response that 302s to /register. """ args = () kwargs = { 'request': strategy.request, 'backend': strategy.request.backend, 'user': None, 'response': self.get_response_data(), } return strategy.authenticate(*args, **kwargs) def get_registration_post_vars(self, overrides=None): """POST vars generated by the registration form.""" defaults = { 'username': 'username', 'name': 'First Last', 'gender': '', 'year_of_birth': '', 'level_of_education': '', 'goals': '', 'honor_code': 'true', 'terms_of_service': 'true', 'password': 'password', 'mailing_address': '', 'email': 'user@email.com', } if overrides: defaults.update(overrides) return defaults def get_request_and_strategy(self, auth_entry=None, redirect_uri=None): """Gets a fully-configured request and strategy. These two objects contain circular references, so we create them together. The references themselves are a mixture of normal __init__ stuff and monkey-patching done by python-social-auth. See, for example, social.apps.django_apps.utils.strategy(). """ request = self.request_factory.get( pipeline.get_complete_url(self.backend_name) + '?redirect_state=redirect_state_value&code=code_value&state=state_value') request.user = auth_models.AnonymousUser() request.session = cache.SessionStore() request.session[self.backend_name + '_state'] = 'state_value' if auth_entry: request.session[pipeline.AUTH_ENTRY_KEY] = auth_entry strategy = social_utils.load_strategy(request=request) request.social_strategy = strategy request.backend = social_utils.load_backend(strategy, self.backend_name, redirect_uri) return request, strategy def get_user_by_email(self, strategy, email): """Gets a user by email, using the given strategy.""" return strategy.storage.user.user_model().objects.get(email=email) def assert_logged_in_cookie_redirect(self, response): """Verify that the user was redirected in order to set the logged in cookie. """ self.assertEqual(response.status_code, 302) self.assertEqual( response["Location"], pipeline.get_complete_url(self.provider.backend_name) ) self.assertEqual(response.cookies[django_settings.EDXMKTG_LOGGED_IN_COOKIE_NAME].value, 'true') self.assertIn(django_settings.EDXMKTG_USER_INFO_COOKIE_NAME, response.cookies) def set_logged_in_cookies(self, request): """Simulate setting the marketing site cookie on the request. """ request.COOKIES[django_settings.EDXMKTG_LOGGED_IN_COOKIE_NAME] = 'true' request.COOKIES[django_settings.EDXMKTG_USER_INFO_COOKIE_NAME] = json.dumps({ 'version': django_settings.EDXMKTG_USER_INFO_COOKIE_VERSION, }) # Actual tests, executed once per child. def test_canceling_authentication_redirects_to_login_when_auth_entry_login(self): self.assert_exception_redirect_looks_correct('/login', auth_entry=pipeline.AUTH_ENTRY_LOGIN) def test_canceling_authentication_redirects_to_register_when_auth_entry_register(self): self.assert_exception_redirect_looks_correct('/register', auth_entry=pipeline.AUTH_ENTRY_REGISTER) def test_canceling_authentication_redirects_to_account_settings_when_auth_entry_account_settings(self): self.assert_exception_redirect_looks_correct( '/account/settings', auth_entry=pipeline.AUTH_ENTRY_ACCOUNT_SETTINGS ) def test_canceling_authentication_redirects_to_root_when_auth_entry_not_set(self): self.assert_exception_redirect_looks_correct('/') def test_full_pipeline_succeeds_for_linking_account(self): # First, create, the request and strategy that store pipeline state, # configure the backend, and mock out wire traffic. request, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete') request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) pipeline.analytics.track = mock.MagicMock() request.user = self.create_user_models_for_existing_account( strategy, 'user@example.com', 'password', self.get_username(), skip_social_auth=True) # Instrument the pipeline to get to the dashboard with the full # expected state. self.client.get( pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN)) actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access mako_middleware_process_request(strategy.request) student_views.signin_user(strategy.request) student_views.login_user(strategy.request) actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access # First we expect that we're in the unlinked state, and that there # really is no association in the backend. self.assert_account_settings_context_looks_correct(account_settings_context(request), request.user, linked=False) self.assert_social_auth_does_not_exist_for_user(request.user, strategy) # We should be redirected back to the complete page, setting # the "logged in" cookie for the marketing site. self.assert_logged_in_cookie_redirect(actions.do_complete( request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access redirect_field_name=auth.REDIRECT_FIELD_NAME )) # Set the cookie and try again self.set_logged_in_cookies(request) # Fire off the auth pipeline to link. self.assert_redirect_to_dashboard_looks_correct(actions.do_complete( request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access redirect_field_name=auth.REDIRECT_FIELD_NAME)) # Now we expect to be in the linked state, with a backend entry. self.assert_social_auth_exists_for_user(request.user, strategy) self.assert_account_settings_context_looks_correct(account_settings_context(request), request.user, linked=True) def test_full_pipeline_succeeds_for_unlinking_account(self): # First, create, the request and strategy that store pipeline state, # configure the backend, and mock out wire traffic. request, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete') request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) user = self.create_user_models_for_existing_account( strategy, 'user@example.com', 'password', self.get_username()) self.assert_social_auth_exists_for_user(user, strategy) # We're already logged in, so simulate that the cookie is set correctly self.set_logged_in_cookies(request) # Instrument the pipeline to get to the dashboard with the full # expected state. self.client.get( pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN)) actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access mako_middleware_process_request(strategy.request) student_views.signin_user(strategy.request) student_views.login_user(strategy.request) actions.do_complete(request.backend, social_views._do_login, user=user) # pylint: disable=protected-access # First we expect that we're in the linked state, with a backend entry. self.assert_account_settings_context_looks_correct(account_settings_context(request), user, linked=True) self.assert_social_auth_exists_for_user(request.user, strategy) # Fire off the disconnect pipeline to unlink. self.assert_redirect_to_dashboard_looks_correct(actions.do_disconnect( request.backend, request.user, None, redirect_field_name=auth.REDIRECT_FIELD_NAME)) # Now we expect to be in the unlinked state, with no backend entry. self.assert_account_settings_context_looks_correct(account_settings_context(request), user, linked=False) self.assert_social_auth_does_not_exist_for_user(user, strategy) def test_linking_already_associated_account_raises_auth_already_associated(self): # This is of a piece with # test_already_associated_exception_populates_dashboard_with_error. It # verifies the exception gets raised when we expect; the latter test # covers exception handling. email = 'user@example.com' password = 'password' username = self.get_username() _, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete') backend = strategy.request.backend backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) linked_user = self.create_user_models_for_existing_account(strategy, email, password, username) unlinked_user = social_utils.Storage.user.create_user( email='other_' + email, password=password, username='other_' + username) self.assert_social_auth_exists_for_user(linked_user, strategy) self.assert_social_auth_does_not_exist_for_user(unlinked_user, strategy) with self.assertRaises(exceptions.AuthAlreadyAssociated): # pylint: disable=protected-access actions.do_complete(backend, social_views._do_login, user=unlinked_user) def test_already_associated_exception_populates_dashboard_with_error(self): # Instrument the pipeline with an exception. We test that the # exception is raised correctly separately, so it's ok that we're # raising it artificially here. This makes the linked=True artificial # in the final assert because in practice the account would be # unlinked, but getting that behavior is cumbersome here and already # covered in other tests. Using linked=True does, however, let us test # that the duplicate error has no effect on the state of the controls. request, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete') strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) user = self.create_user_models_for_existing_account( strategy, 'user@example.com', 'password', self.get_username()) self.assert_social_auth_exists_for_user(user, strategy) self.client.get('/login') self.client.get(pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN)) actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access mako_middleware_process_request(strategy.request) student_views.signin_user(strategy.request) student_views.login_user(strategy.request) actions.do_complete(request.backend, social_views._do_login, user=user) # pylint: disable=protected-access # Monkey-patch storage for messaging; pylint: disable=protected-access request._messages = fallback.FallbackStorage(request) middleware.ExceptionMiddleware().process_exception( request, exceptions.AuthAlreadyAssociated(self.provider.backend_name, 'account is already in use.')) self.assert_account_settings_context_looks_correct( account_settings_context(request), user, duplicate=True, linked=True) def test_full_pipeline_succeeds_for_signing_in_to_existing_active_account(self): # First, create, the request and strategy that store pipeline state, # configure the backend, and mock out wire traffic. request, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete') strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) pipeline.analytics.track = mock.MagicMock() user = self.create_user_models_for_existing_account( strategy, 'user@example.com', 'password', self.get_username()) self.assert_social_auth_exists_for_user(user, strategy) self.assertTrue(user.is_active) # Begin! Ensure that the login form contains expected controls before # the user starts the pipeline. self.assert_login_response_before_pipeline_looks_correct(self.client.get('/login')) # The pipeline starts by a user GETting /auth/login/<provider>. # Synthesize that request and check that it redirects to the correct # provider page. self.assert_redirect_to_provider_looks_correct(self.client.get( pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))) # Next, the provider makes a request against /auth/complete/<provider> # to resume the pipeline. # pylint: disable=protected-access self.assert_redirect_to_login_looks_correct(actions.do_complete(request.backend, social_views._do_login)) mako_middleware_process_request(strategy.request) # At this point we know the pipeline has resumed correctly. Next we # fire off the view that displays the login form and posts it via JS. self.assert_login_response_in_pipeline_looks_correct(student_views.signin_user(strategy.request)) # Next, we invoke the view that handles the POST, and expect it # redirects to /auth/complete. In the browser ajax handlers will # redirect the user to the dashboard; we invoke it manually here. self.assert_json_success_response_looks_correct(student_views.login_user(strategy.request)) # We should be redirected back to the complete page, setting # the "logged in" cookie for the marketing site. self.assert_logged_in_cookie_redirect(actions.do_complete( request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access redirect_field_name=auth.REDIRECT_FIELD_NAME )) # Set the cookie and try again self.set_logged_in_cookies(request) self.assert_redirect_to_dashboard_looks_correct( actions.do_complete(request.backend, social_views._do_login, user=user)) self.assert_account_settings_context_looks_correct(account_settings_context(request), user) def test_signin_fails_if_account_not_active(self): _, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete') strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) user = self.create_user_models_for_existing_account(strategy, 'user@example.com', 'password', self.get_username()) user.is_active = False user.save() mako_middleware_process_request(strategy.request) self.assert_json_failure_response_is_inactive_account(student_views.login_user(strategy.request)) def test_signin_fails_if_no_account_associated(self): _, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete') strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) self.create_user_models_for_existing_account( strategy, 'user@example.com', 'password', self.get_username(), skip_social_auth=True) self.assert_json_failure_response_is_missing_social_auth(student_views.login_user(strategy.request)) def test_first_party_auth_trumps_third_party_auth_but_is_invalid_when_only_email_in_request(self): self.assert_first_party_auth_trumps_third_party_auth(email='user@example.com') def test_first_party_auth_trumps_third_party_auth_but_is_invalid_when_only_password_in_request(self): self.assert_first_party_auth_trumps_third_party_auth(password='password') def test_first_party_auth_trumps_third_party_auth_and_fails_when_credentials_bad(self): self.assert_first_party_auth_trumps_third_party_auth( email='user@example.com', password='password', success=False) def test_first_party_auth_trumps_third_party_auth_and_succeeds_when_credentials_good(self): self.assert_first_party_auth_trumps_third_party_auth( email='user@example.com', password='password', success=True) def test_full_pipeline_succeeds_registering_new_account(self): # First, create, the request and strategy that store pipeline state. # Mock out wire traffic. request, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete') strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) # Begin! Grab the registration page and check the login control on it. self.assert_register_response_before_pipeline_looks_correct(self.client.get('/register')) # The pipeline starts by a user GETting /auth/login/<provider>. # Synthesize that request and check that it redirects to the correct # provider page. self.assert_redirect_to_provider_looks_correct(self.client.get( pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))) # Next, the provider makes a request against /auth/complete/<provider>. # pylint: disable=protected-access self.assert_redirect_to_register_looks_correct(actions.do_complete(request.backend, social_views._do_login)) mako_middleware_process_request(strategy.request) # At this point we know the pipeline has resumed correctly. Next we # fire off the view that displays the registration form. self.assert_register_response_in_pipeline_looks_correct( student_views.register_user(strategy.request), pipeline.get(request)['kwargs']) # Next, we invoke the view that handles the POST. Not all providers # supply email. Manually add it as the user would have to; this # also serves as a test of overriding provider values. Always provide a # password for us to check that we override it properly. overridden_password = strategy.request.POST.get('password') email = 'new@example.com' if not strategy.request.POST.get('email'): strategy.request.POST = self.get_registration_post_vars({'email': email}) # The user must not exist yet... with self.assertRaises(auth_models.User.DoesNotExist): self.get_user_by_email(strategy, email) # ...but when we invoke create_account the existing edX view will make # it, but not social auths. The pipeline creates those later. self.assert_json_success_response_looks_correct(student_views.create_account(strategy.request)) # We've overridden the user's password, so authenticate() with the old # value won't work: created_user = self.get_user_by_email(strategy, email) self.assert_password_overridden_by_pipeline(overridden_password, created_user.username) # At this point the user object exists, but there is no associated # social auth. self.assert_social_auth_does_not_exist_for_user(created_user, strategy) # We should be redirected back to the complete page, setting # the "logged in" cookie for the marketing site. self.assert_logged_in_cookie_redirect(actions.do_complete( request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access redirect_field_name=auth.REDIRECT_FIELD_NAME )) # Set the cookie and try again self.set_logged_in_cookies(request) self.assert_redirect_to_dashboard_looks_correct( actions.do_complete(strategy.request.backend, social_views._do_login, user=created_user)) # Now the user has been redirected to the dashboard. Their third party account should now be linked. self.assert_social_auth_exists_for_user(created_user, strategy) self.assert_account_settings_context_looks_correct(account_settings_context(request), created_user, linked=True) def test_new_account_registration_assigns_distinct_username_on_collision(self): original_username = self.get_username() request, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete') # Create a colliding username in the backend, then proceed with # assignment via pipeline to make sure a distinct username is created. strategy.storage.user.create_user(username=self.get_username(), email='user@email.com', password='password') backend = strategy.request.backend backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) # pylint: disable=protected-access self.assert_redirect_to_register_looks_correct(actions.do_complete(backend, social_views._do_login)) distinct_username = pipeline.get(request)['kwargs']['username'] self.assertNotEqual(original_username, distinct_username) def test_new_account_registration_fails_if_email_exists(self): request, strategy = self.get_request_and_strategy( auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete') backend = strategy.request.backend backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) # pylint: disable=protected-access self.assert_redirect_to_register_looks_correct(actions.do_complete(backend, social_views._do_login)) mako_middleware_process_request(strategy.request) self.assert_register_response_in_pipeline_looks_correct( student_views.register_user(strategy.request), pipeline.get(request)['kwargs']) strategy.request.POST = self.get_registration_post_vars() # Create twice: once successfully, and once causing a collision. student_views.create_account(strategy.request) self.assert_json_failure_response_is_username_collision(student_views.create_account(strategy.request)) def test_pipeline_raises_auth_entry_error_if_auth_entry_invalid(self): auth_entry = 'invalid' self.assertNotIn(auth_entry, pipeline._AUTH_ENTRY_CHOICES) # pylint: disable=protected-access _, strategy = self.get_request_and_strategy(auth_entry=auth_entry, redirect_uri='social:complete') with self.assertRaises(pipeline.AuthEntryError): strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) def test_pipeline_raises_auth_entry_error_if_auth_entry_missing(self): _, strategy = self.get_request_and_strategy(auth_entry=None, redirect_uri='social:complete') with self.assertRaises(pipeline.AuthEntryError): strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy)) class Oauth2IntegrationTest(IntegrationTest): # pylint: disable=abstract-method """Base test case for integration tests of Oauth2 providers.""" # Dict of string -> object. Information about the token granted to the # user. Override with test values in subclass; None to force a throw. TOKEN_RESPONSE_DATA = None # Dict of string -> object. Information about the user themself. Override # with test values in subclass; None to force a throw. USER_RESPONSE_DATA = None def get_response_data(self): """Gets dict (string -> object) of merged data about the user.""" response_data = dict(self.TOKEN_RESPONSE_DATA) response_data.update(self.USER_RESPONSE_DATA) return response_data
agpl-3.0
igemsoftware/SYSU-Software2013
project/Python27_32/Lib/test/test_bufio.py
125
2755
import unittest from test import test_support as support import io # C implementation. import _pyio as pyio # Python implementation. # Simple test to ensure that optimizations in the IO library deliver the # expected results. For best testing, run this under a debug-build Python too # (to exercise asserts in the C code). lengths = list(range(1, 257)) + [512, 1000, 1024, 2048, 4096, 8192, 10000, 16384, 32768, 65536, 1000000] class BufferSizeTest(unittest.TestCase): def try_one(self, s): # Write s + "\n" + s to file, then open it and ensure that successive # .readline()s deliver what we wrote. # Ensure we can open TESTFN for writing. support.unlink(support.TESTFN) # Since C doesn't guarantee we can write/read arbitrary bytes in text # files, use binary mode. f = self.open(support.TESTFN, "wb") try: # write once with \n and once without f.write(s) f.write(b"\n") f.write(s) f.close() f = open(support.TESTFN, "rb") line = f.readline() self.assertEqual(line, s + b"\n") line = f.readline() self.assertEqual(line, s) line = f.readline() self.assertTrue(not line) # Must be at EOF f.close() finally: support.unlink(support.TESTFN) def drive_one(self, pattern): for length in lengths: # Repeat string 'pattern' as often as needed to reach total length # 'length'. Then call try_one with that string, a string one larger # than that, and a string one smaller than that. Try this with all # small sizes and various powers of 2, so we exercise all likely # stdio buffer sizes, and "off by one" errors on both sides. q, r = divmod(length, len(pattern)) teststring = pattern * q + pattern[:r] self.assertEqual(len(teststring), length) self.try_one(teststring) self.try_one(teststring + b"x") self.try_one(teststring[:-1]) def test_primepat(self): # A pattern with prime length, to avoid simple relationships with # stdio buffer sizes. self.drive_one(b"1234567890\00\01\02\03\04\05\06") def test_nullpat(self): self.drive_one(bytes(1000)) class CBufferSizeTest(BufferSizeTest): open = io.open class PyBufferSizeTest(BufferSizeTest): open = staticmethod(pyio.open) class BuiltinBufferSizeTest(BufferSizeTest): open = open def test_main(): support.run_unittest(CBufferSizeTest, PyBufferSizeTest, BuiltinBufferSizeTest) if __name__ == "__main__": test_main()
mit
noironetworks/group-based-policy
gbpservice/nfp/core/cfg.py
1
1671
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg as oslo_config CONF = oslo_config.CONF NFP_OPTS = [ oslo_config.IntOpt( 'workers', default=1, help='Number of event worker process to be created.' ), oslo_config.ListOpt( 'nfp_modules_path', default='gbpservice.nfp.core.test', help='Path for NFP modules.' 'All modules from this path are autoloaded by framework' ), oslo_config.StrOpt( 'backend', default='rpc', help='Backend Support for communicationg with configurator.' ) ] EXTRA_OPTS = [ oslo_config.StrOpt( 'logger_class', default='gbpservice.nfp.core.log.WrappedLogger', help='logger class path to handle logging seperately.' ), ] def init(module, args, **kwargs): """Initialize the configuration. """ oslo_config.CONF.register_opts(EXTRA_OPTS) oslo_config.CONF.register_opts(NFP_OPTS, module) oslo_config.CONF(args=args, project='nfp', version='%%(prog)s %s' % ('version'), **kwargs) return oslo_config.CONF
apache-2.0
GodBlessPP/w16b_test
static/Brython3.1.3-20150514-095342/Lib/genericpath.py
727
3093
""" Path operations common to more than one OS Do not use directly. The OS specific modules import the appropriate functions from this module themselves. """ import os import stat __all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', 'getsize', 'isdir', 'isfile'] # Does a path exist? # This is false for dangling symbolic links on systems that support them. def exists(path): """Test whether a path exists. Returns False for broken symbolic links""" try: os.stat(path) except os.error: return False return True # This follows symbolic links, so both islink() and isdir() can be true # for the same path on systems that support symlinks def isfile(path): """Test whether a path is a regular file""" try: st = os.stat(path) except os.error: return False return stat.S_ISREG(st.st_mode) # Is a path a directory? # This follows symbolic links, so both islink() and isdir() # can be true for the same path on systems that support symlinks def isdir(s): """Return true if the pathname refers to an existing directory.""" try: st = os.stat(s) except os.error: return False return stat.S_ISDIR(st.st_mode) def getsize(filename): """Return the size of a file, reported by os.stat().""" return os.stat(filename).st_size def getmtime(filename): """Return the last modification time of a file, reported by os.stat().""" return os.stat(filename).st_mtime def getatime(filename): """Return the last access time of a file, reported by os.stat().""" return os.stat(filename).st_atime def getctime(filename): """Return the metadata change time of a file, reported by os.stat().""" return os.stat(filename).st_ctime # Return the longest prefix of all list elements. def commonprefix(m): "Given a list of pathnames, returns the longest common leading component" if not m: return '' s1 = min(m) s2 = max(m) for i, c in enumerate(s1): if c != s2[i]: return s1[:i] return s1 # Split a path in root and extension. # The extension is everything starting at the last dot in the last # pathname component; the root is everything before that. # It is always true that root + ext == p. # Generic implementation of splitext, to be parametrized with # the separators def _splitext(p, sep, altsep, extsep): """Split the extension from a pathname. Extension is everything from the last dot to the end, ignoring leading dots. Returns "(root, ext)"; ext may be empty.""" # NOTE: This code must work for text and bytes strings. sepIndex = p.rfind(sep) if altsep: altsepIndex = p.rfind(altsep) sepIndex = max(sepIndex, altsepIndex) dotIndex = p.rfind(extsep) if dotIndex > sepIndex: # skip all leading dots filenameIndex = sepIndex + 1 while filenameIndex < dotIndex: if p[filenameIndex:filenameIndex+1] != extsep: return p[:dotIndex], p[dotIndex:] filenameIndex += 1 return p, p[:0]
agpl-3.0
rzambre/servo
tests/wpt/web-platform-tests/cors/resources/preflight.py
253
1238
def main(request, response): headers = [("Content-Type", "text/plain")] if "check" in request.GET: token = request.GET.first("token") value = request.server.stash.take(token) if value == None: body = "0" else: if request.GET.first("check", None) == "keep": request.server.stash.put(token, value) body = "1" return headers, body if request.method == "OPTIONS": if not "Access-Control-Request-Method" in request.headers: response.set_error(400, "No Access-Control-Request-Method header") return "ERROR: No access-control-request-method in preflight!" headers.append(("Access-Control-Allow-Methods", request.headers['Access-Control-Request-Method'])) if "max_age" in request.GET: headers.append(("Access-Control-Max-Age", request.GET['max_age'])) if "token" in request.GET: request.server.stash.put(request.GET.first("token"), 1) headers.append(("Access-Control-Allow-Origin", "*")) headers.append(("Access-Control-Allow-Headers", "x-print")) body = request.headers.get("x-print", "NO") return headers, body
mpl-2.0
pipicold/BlockyTime
server_new/testing/blocks_controller_tester.py
1
2255
import datetime from Initialization import Initialization from Data_Controllers.blocks_controller import blocks_controller from DB_Model import database_helper from DB_Model.database_tester import database_tester from DB_Model.database_model import Blocks from DB_Model.database_model import Users class blocks_controller_tester(object): ''' tester for day_controller ''' def __init__(self): print("\n\n\n\n\n\n\n\n\n\n\n\n!!!users_controller_tester!!!") self.database = Initialization().get_global_db() self.db_helper = database_helper.database_helper(self.database) self.db_helper.rebuild_database() self.db_tester = database_tester() self.db_tester.generate_fake_data() new_pri = self.db_tester.generate_fake_primary_category() new_sec = self.db_tester.generate_fake_secondary_category(new_pri) new_sec = self.db_tester.generate_fake_secondary_category(new_pri) new_sec = self.db_tester.generate_fake_secondary_category(new_pri) self.db_helper.dump_all_data() def testing_get_blocks_list_of_a_day(self): ''' :) ''' blocks_con_tester = blocks_controller('0') assert len(blocks_con_tester.get_blocks_list_of_a_day( '2017-03-10')) == 48 print "testing_get_blocks_list_of_a_day SUCCESS <<<<<<<<<<<<<<<<<<<<<<" def testing_update_a_block(self): ''' :) ''' blocks_con_tester = blocks_controller('0') blocks_con_tester.update_a_block('1', '1') ret_obj = Blocks.query.filter_by(id=1).first() assert ret_obj.secondary_category_id == 1 assert ret_obj.primary_category_id == 1 blocks_con_tester.update_a_block(2, 2) ret_obj = Blocks.query.filter_by(id=2).first() assert ret_obj.secondary_category_id == 2 assert ret_obj.primary_category_id == 1 #there is no secondary_category_id = 6, not update,no crashing blocks_con_tester.update_a_block(3, 6) ret_obj = Blocks.query.filter_by(id=3).first() assert ret_obj.secondary_category_id == 0 assert ret_obj.primary_category_id == 0 print "testing_update_a_block SUCCESS <<<<<<<<<<<<<<<<<<<<<<"
gpl-3.0
alx-eu/django
tests/modeltests/select_related/models.py
114
1906
""" 41. Tests for select_related() ``select_related()`` follows all relationships and pre-caches any foreign key values so that complex trees can be fetched in a single query. However, this isn't always a good idea, so the ``depth`` argument control how many "levels" the select-related behavior will traverse. """ from django.db import models from django.utils.encoding import python_2_unicode_compatible # Who remembers high school biology? @python_2_unicode_compatible class Domain(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name @python_2_unicode_compatible class Kingdom(models.Model): name = models.CharField(max_length=50) domain = models.ForeignKey(Domain) def __str__(self): return self.name @python_2_unicode_compatible class Phylum(models.Model): name = models.CharField(max_length=50) kingdom = models.ForeignKey(Kingdom) def __str__(self): return self.name @python_2_unicode_compatible class Klass(models.Model): name = models.CharField(max_length=50) phylum = models.ForeignKey(Phylum) def __str__(self): return self.name @python_2_unicode_compatible class Order(models.Model): name = models.CharField(max_length=50) klass = models.ForeignKey(Klass) def __str__(self): return self.name @python_2_unicode_compatible class Family(models.Model): name = models.CharField(max_length=50) order = models.ForeignKey(Order) def __str__(self): return self.name @python_2_unicode_compatible class Genus(models.Model): name = models.CharField(max_length=50) family = models.ForeignKey(Family) def __str__(self): return self.name @python_2_unicode_compatible class Species(models.Model): name = models.CharField(max_length=50) genus = models.ForeignKey(Genus) def __str__(self): return self.name
bsd-3-clause
jiegec/gnuradio
grc/base/Param.py
9
7863
""" Copyright 2008-2011 Free Software Foundation, Inc. This file is part of GNU Radio GNU Radio Companion is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GNU Radio Companion is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA """ from . import odict from Element import Element def _get_keys(lst): return [elem.get_key() for elem in lst] def _get_elem(lst, key): try: return lst[_get_keys(lst).index(key)] except ValueError: raise ValueError, 'Key "%s" not found in %s.'%(key, _get_keys(lst)) class Option(Element): def __init__(self, param, n): Element.__init__(self, param) self._name = n.find('name') self._key = n.find('key') self._opts = dict() opts = n.findall('opt') #test against opts when non enum if not self.get_parent().is_enum() and opts: raise Exception, 'Options for non-enum types cannot have sub-options' #extract opts for opt in opts: #separate the key:value try: key, value = opt.split(':') except: raise Exception, 'Error separating "%s" into key:value'%opt #test against repeated keys if self._opts.has_key(key): raise Exception, 'Key "%s" already exists in option'%key #store the option self._opts[key] = value def __str__(self): return 'Option %s(%s)'%(self.get_name(), self.get_key()) def get_name(self): return self._name def get_key(self): return self._key ############################################## # Access Opts ############################################## def get_opt_keys(self): return self._opts.keys() def get_opt(self, key): return self._opts[key] def get_opts(self): return self._opts.values() class Param(Element): def __init__(self, block, n): """ Make a new param from nested data. Args: block: the parent element n: the nested odict """ # if the base key is a valid param key, copy its data and overlay this params data base_key = n.find('base_key') if base_key and base_key in block.get_param_keys(): n_expanded = block.get_param(base_key)._n.copy() n_expanded.update(n) n = n_expanded # save odict in case this param will be base for another self._n = n # parse the data self._name = n.find('name') self._key = n.find('key') value = n.find('value') or '' self._type = n.find('type') or 'raw' self._hide = n.find('hide') or '' self._tab_label = n.find('tab') or block.get_param_tab_labels()[0] if not self._tab_label in block.get_param_tab_labels(): block.get_param_tab_labels().append(self._tab_label) #build the param Element.__init__(self, block) #create the Option objects from the n data self._options = list() for option in map(lambda o: Option(param=self, n=o), n.findall('option')): key = option.get_key() #test against repeated keys if key in self.get_option_keys(): raise Exception, 'Key "%s" already exists in options'%key #store the option self.get_options().append(option) #test the enum options if self.is_enum(): #test against options with identical keys if len(set(self.get_option_keys())) != len(self.get_options()): raise Exception, 'Options keys "%s" are not unique.'%self.get_option_keys() #test against inconsistent keys in options opt_keys = self.get_options()[0].get_opt_keys() for option in self.get_options(): if set(opt_keys) != set(option.get_opt_keys()): raise Exception, 'Opt keys "%s" are not identical across all options.'%opt_keys #if a value is specified, it must be in the options keys self._value = value if value or value in self.get_option_keys() else self.get_option_keys()[0] if self.get_value() not in self.get_option_keys(): raise Exception, 'The value "%s" is not in the possible values of "%s".'%(self.get_value(), self.get_option_keys()) else: self._value = value or '' def validate(self): """ Validate the param. The value must be evaluated and type must a possible type. """ Element.validate(self) if self.get_type() not in self.get_types(): self.add_error_message('Type "%s" is not a possible type.'%self.get_type()) def get_evaluated(self): raise NotImplementedError def to_code(self): """ Convert the value to code. @throw NotImplementedError """ raise NotImplementedError def get_types(self): """ Get a list of all possible param types. @throw NotImplementedError """ raise NotImplementedError def get_color(self): return '#FFFFFF' def __str__(self): return 'Param - %s(%s)'%(self.get_name(), self.get_key()) def is_param(self): return True def get_name(self): return self.get_parent().resolve_dependencies(self._name).strip() def get_key(self): return self._key def get_hide(self): return self.get_parent().resolve_dependencies(self._hide).strip() def get_value(self): value = self._value if self.is_enum() and value not in self.get_option_keys(): value = self.get_option_keys()[0] self.set_value(value) return value def set_value(self, value): self._value = str(value) #must be a string def get_type(self): return self.get_parent().resolve_dependencies(self._type) def get_tab_label(self): return self._tab_label def is_enum(self): return self._type == 'enum' def __repr__(self): """ Get the repr (nice string format) for this param. Just return the value (special case enum). Derived classes can handle complex formatting. Returns: the string representation """ if self.is_enum(): return self.get_option(self.get_value()).get_name() return self.get_value() ############################################## # Access Options ############################################## def get_option_keys(self): return _get_keys(self.get_options()) def get_option(self, key): return _get_elem(self.get_options(), key) def get_options(self): return self._options ############################################## # Access Opts ############################################## def get_opt_keys(self): return self.get_option(self.get_value()).get_opt_keys() def get_opt(self, key): return self.get_option(self.get_value()).get_opt(key) def get_opts(self): return self.get_option(self.get_value()).get_opts() ############################################## ## Import/Export Methods ############################################## def export_data(self): """ Export this param's key/value. Returns: a nested data odict """ n = odict() n['key'] = self.get_key() n['value'] = self.get_value() return n
gpl-3.0