gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright (c) 2013-2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import mock
from oslo_config import cfg
import six
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
nexus_network_driver)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import exceptions
from networking_cisco.plugins.ml2.drivers.cisco.nexus import nexus_db_v2
from networking_cisco.tests.unit.ml2.drivers.cisco.nexus import (
test_cisco_nexus_base)
from neutron.plugins.common import constants as p_const
CONNECT_ERROR = 'Unable to connect to Nexus'
class TestCiscoNexusDeviceConfig(object):
"""Unit tests Config for Cisco ML2 Nexus device driver."""
test_configs = {
'test_config1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config2':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_2,
test_cisco_nexus_base.NEXUS_PORT_2,
test_cisco_nexus_base.INSTANCE_2,
test_cisco_nexus_base.VLAN_ID_2,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config3':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_2,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config4':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_8,
test_cisco_nexus_base.HOST_NAME_4,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config5':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_8,
test_cisco_nexus_base.HOST_NAME_5,
test_cisco_nexus_base.NEXUS_PORT_2,
test_cisco_nexus_base.INSTANCE_2,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config_portchannel':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_2,
test_cisco_nexus_base.HOST_NAME_PC,
test_cisco_nexus_base.NEXUS_PORTCHANNELS,
test_cisco_nexus_base.INSTANCE_PC,
test_cisco_nexus_base.VLAN_ID_PC,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config_dual':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_DUAL,
test_cisco_nexus_base.HOST_NAME_DUAL,
test_cisco_nexus_base.NEXUS_DUAL,
test_cisco_nexus_base.INSTANCE_DUAL,
test_cisco_nexus_base.VLAN_ID_DUAL,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config_dhcp':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_DHCP,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config_router_ha_intf':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_ROUTER_HA_INTF,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config_router_intf':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_ROUTER_INTF,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config_router_gw':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_ROUTER_GW,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config_portchannel2':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_6,
test_cisco_nexus_base.HOST_NAME_3,
test_cisco_nexus_base.NEXUS_PORTCHANNELS,
test_cisco_nexus_base.INSTANCE_PC,
test_cisco_nexus_base.VLAN_ID_PC,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config_portchannel3':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_7,
test_cisco_nexus_base.HOST_NAME_3,
test_cisco_nexus_base.NEXUS_PORTCHANNELS,
test_cisco_nexus_base.INSTANCE_PC,
test_cisco_nexus_base.VLAN_ID_PC,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config_migrate':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_3,
test_cisco_nexus_base.HOST_NAME_6,
test_cisco_nexus_base.NEXUS_PORT_2,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
}
test_configs = collections.OrderedDict(sorted(test_configs.items()))
class TestCiscoNexusDeviceResults(
test_cisco_nexus_base.TestCiscoNexusBaseResults):
"""Unit tests driver results for Cisco ML2 Nexus."""
test_results = {
'duplicate_add_port_driver_result': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267)]),
'duplicate_del_port_driver_result': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 267),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267)]),
'add_port2_driver_result': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(265),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/20', 265)]),
'delete_port2_driver_result': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/20', 265),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(265)]),
'add_port2_driver_result2': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/20', 267)]),
'delete_port2_driver_result2': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/20', 267)]),
'add_port2_driver_result3': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(268),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('portchannel', '2', 268),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(268),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('portchannel', '2', 268)]),
'delete_port2_driver_result3': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('portchannel', '2', 268),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(268),
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('portchannel', '2', 268),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(268)]),
'add_port_channel_driver_result': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(268),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('portchannel', '2', 268)]),
'delete_port_channel_driver_result': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('portchannel', '2', 268),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(268)]),
'dual_add_port_driver_result': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(269),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/3', 269),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(269),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('portchannel', '2', 269)]),
'dual_delete_port_driver_result': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/3', 269),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(269),
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('portchannel', '2', 269)]),
'migrate_add_host2_driver_result': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/20', 267)]),
'add_port_driver_result': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267)]),
'del_port_driver_result': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 267),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267)]),
}
class TestCiscoNexusDevice(test_cisco_nexus_base.TestCiscoNexusBase,
TestCiscoNexusDeviceConfig,
TestCiscoNexusDeviceResults):
"""Unit tests for Cisco ML2 Nexus device driver."""
def setUp(self):
"""Sets up mock ncclient, and switch and credentials dictionaries."""
cfg.CONF.set_override('never_cache_ssh_connection', False, 'ml2_cisco')
super(TestCiscoNexusDevice, self).setUp()
self.mock_ncclient.reset_mock()
self.results = TestCiscoNexusDeviceResults()
def test_create_delete_duplicate_ports(self):
"""Tests creation and deletion of two new virtual Ports."""
self._basic_create_verify_port_vlan(
'test_config1',
self.results.get_test_results('duplicate_add_port_driver_result')
)
self._create_port(
self.test_configs['test_config3'])
# TODO(caboucha)
# Commented out until the correct fix for
# the following issue is resolved.
# https://review.openstack.org/#/c/241216/
#
# verify first config was indeed configured
# Original code was as follows:
# self._verify_results(duplicate_add_port_driver_result)
# Verify there are 2 port configs
bindings = nexus_db_v2.get_nexusvlan_binding(
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1)
self.assertEqual(2, len(bindings))
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
# For results, pass empty list to verify no nexus action on
# first port removal.
self._basic_delete_verify_port_vlan(
'test_config1',
[], nbr_of_bindings=1)
self._basic_delete_verify_port_vlan(
'test_config3',
self.results.get_test_results('duplicate_del_port_driver_result'))
def test_create_delete_duplicate_port_transaction(self):
"""Tests creation and deletion same port transaction."""
self._basic_create_verify_port_vlan(
'test_config1',
self.results.get_test_results('duplicate_add_port_driver_result')
)
self.assertEqual(
1, len(nexus_db_v2.get_nexusport_switch_bindings(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1)))
self._create_port(
self.test_configs['test_config1'])
self._verify_results(
self.results.get_test_results('duplicate_add_port_driver_result')
)
self.assertEqual(
1, len(nexus_db_v2.get_nexusport_switch_bindings(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1)))
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
self._basic_delete_verify_port_vlan(
'test_config1',
self.results.get_test_results('duplicate_del_port_driver_result'),
nbr_of_bindings=0)
self._basic_delete_verify_port_vlan(
'test_config1',
self.results.get_test_results('duplicate_del_port_driver_result'))
def test_create_delete_same_switch_diff_hosts_diff_vlan(self):
"""Test create/delete two Ports, same switch/diff host & vlan."""
self._basic_create_verify_port_vlan(
'test_config1',
self.results.get_test_results('duplicate_add_port_driver_result'))
self._create_port(
self.test_configs['test_config2'])
self._verify_results(
self.results.get_test_results('add_port2_driver_result'))
# Verify there are 2 port configs
bindings = nexus_db_v2.get_nexusport_switch_bindings(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1)
self.assertEqual(2, len(bindings))
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
self._basic_delete_verify_port_vlan(
'test_config2',
self.results.get_test_results('delete_port2_driver_result'),
nbr_of_bindings=1)
self._basic_delete_verify_port_vlan(
'test_config1',
self.results.get_test_results('duplicate_del_port_driver_result'))
def test_create_delete_same_switch_diff_hosts_same_vlan(self):
"""Test create/delete two Ports, same switch & vlan/diff host."""
self._basic_create_verify_port_vlan(
'test_config4',
self.results.get_test_results('add_port_driver_result'))
self._create_port(
self.test_configs['test_config5'])
self._verify_results(
self.results.get_test_results('add_port2_driver_result2'))
# Verify there are 2 port configs
bindings = nexus_db_v2.get_nexusport_switch_bindings(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_8)
self.assertEqual(2, len(bindings))
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
self._basic_delete_verify_port_vlan(
'test_config5',
self.results.get_test_results('delete_port2_driver_result2'),
nbr_of_bindings=1)
self._basic_delete_verify_port_vlan(
'test_config4',
self.results.get_test_results('del_port_driver_result'))
def test_create_delete_diff_switch_same_host(self):
"""Test create/delete of two Ports, diff switch/same host."""
self._basic_create_verify_port_vlan(
'test_config_portchannel2',
self.results.get_test_results('add_port2_driver_result3'))
# Verify there are 2 port configs. One per switch.
bindings = nexus_db_v2.get_nexusport_switch_bindings(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_6)
self.assertEqual(1, len(bindings))
bindings = nexus_db_v2.get_nexusport_switch_bindings(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_7)
self.assertEqual(1, len(bindings))
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
# For results, pass empty list to verify no nexus action on
# first port removal.
self._basic_delete_verify_port_vlan(
'test_config_portchannel2',
self.results.get_test_results('delete_port2_driver_result3'))
def test_create_delete_portchannel(self):
"""Tests creation of a port over a portchannel."""
self._create_delete_port(
'test_config_portchannel',
self.results.get_test_results('add_port_channel_driver_result'),
self.results.get_test_results('delete_port_channel_driver_result'))
def test_create_delete_dual(self):
"""Tests creation and deletion of dual ports for single server"""
self._basic_create_verify_port_vlan(
'test_config_dual',
self.results.get_test_results('dual_add_port_driver_result'),
nbr_of_bindings=2)
self._basic_delete_verify_port_vlan(
'test_config_dual',
self.results.get_test_results('dual_delete_port_driver_result'))
def test_create_delete_dhcp(self):
"""Tests creation and deletion of ports with device_owner of dhcp."""
self._create_delete_port(
'test_config_dhcp',
self.results.get_test_results('duplicate_add_port_driver_result'),
self.results.get_test_results('duplicate_del_port_driver_result'))
def test_create_delete_router_ha_intf(self):
"""Tests creation and deletion of ports with device_owner
of router_ha_interface.
"""
self._create_delete_port(
'test_config_router_ha_intf',
self.results.get_test_results('duplicate_add_port_driver_result'),
self.results.get_test_results('duplicate_del_port_driver_result'))
def test_create_delete_router_intf(self):
"""Tests creation and deletion of ports with device_owner
of router_interface.
"""
self._create_delete_port(
'test_config_router_intf',
self.results.get_test_results('duplicate_add_port_driver_result'),
self.results.get_test_results('duplicate_del_port_driver_result'))
def test_create_delete_router_gateway(self):
"""Tests creation and deletion of ports with device_owner
of router_gateway.
"""
self._create_delete_port(
'test_config_router_gw',
self.results.get_test_results('duplicate_add_port_driver_result'),
self.results.get_test_results('duplicate_del_port_driver_result'))
def test_nexus_vm_migration(self):
"""Verify VM (live) migration.
Simulate the following:
Nova informs neutron of live-migration with port-update(new host).
This should trigger two update_port_pre/postcommit() calls.
The first one should only change the current host_id and remove the
binding resulting in the mechanism drivers receiving:
PortContext.original['binding:host_id']: previous value
PortContext.original_top_bound_segment: previous value
PortContext.current['binding:host_id']: current (new) value
PortContext.top_bound_segment: None
The second one binds the new host resulting in the mechanism
drivers receiving:
PortContext.original['binding:host_id']: previous value
PortContext.original_top_bound_segment: None
PortContext.current['binding:host_id']: previous value
PortContext.top_bound_segment: new value
"""
self._basic_create_verify_port_vlan(
'test_config1',
self.results.get_test_results('duplicate_add_port_driver_result'))
binding = nexus_db_v2.get_nexusvm_bindings(
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.INSTANCE_1)[0]
self.assertEqual(
test_cisco_nexus_base.NEXUS_PORT_1,
binding.port_id)
port_context = self._generate_port_context(
self.test_configs['test_config_migrate'],
unbind_port=True)
port_cfg = self.test_configs['test_config1']
port_context.set_orig_port(
port_cfg.instance_id,
port_cfg.host_name,
port_cfg.device_owner,
port_cfg.profile,
port_cfg.vnic_type,
test_cisco_nexus_base.NETID)
self._cisco_mech_driver.create_port_postcommit(port_context)
self._cisco_mech_driver.update_port_precommit(port_context)
self._cisco_mech_driver.update_port_postcommit(port_context)
# Verify that port entry has been deleted.
self.assertRaises(
exceptions.NexusPortBindingNotFound,
nexus_db_v2.get_nexusvm_bindings,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.INSTANCE_1)
# Clean all the ncclient mock_calls to clear exception
# and other mock_call history.
self.mock_ncclient.reset_mock()
self._basic_create_verify_port_vlan(
'test_config_migrate',
self.results.get_test_results('migrate_add_host2_driver_result'))
# Verify that port entry has been added using new host name.
# Use port_id to verify that 2nd host name was used.
binding = nexus_db_v2.get_nexusvm_bindings(
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.INSTANCE_1)[0]
self.assertEqual(
test_cisco_nexus_base.NEXUS_PORT_2,
binding.port_id)
class TestCiscoNexusDeviceFailure(test_cisco_nexus_base.TestCiscoNexusBase,
TestCiscoNexusDeviceConfig,
TestCiscoNexusDeviceResults):
"""Negative Unit tests for Cisco ML2 Nexus device driver."""
def setUp(self):
"""Sets up mock ncclient, and switch and credentials dictionaries."""
cfg.CONF.set_override('never_cache_ssh_connection', False, 'ml2_cisco')
super(TestCiscoNexusDeviceFailure, self).setUp()
self.mock_ncclient.reset_mock()
self.results = TestCiscoNexusDeviceResults()
def test_connect_failure(self):
"""Verifies exception handling during ncclient connect. """
# Clean all the ncclient mock_calls to clear exception
# and other mock_call history.
self.mock_ncclient.reset_mock()
config = {'connect.side_effect': Exception(CONNECT_ERROR)}
self.mock_ncclient.configure_mock(**config)
e = self.assertRaises(exceptions.NexusConnectFailed,
self._create_port,
self.test_configs[
'test_config1'])
self.assertIn(CONNECT_ERROR, six.u(str(e)))
self.assertEqual(1, self.mock_ncclient.connect.call_count)
def test_ncclient_fail_on_connect_other_exceptions(self):
"""Test that other errors during connect() sequences are still handled.
If the old ncclient is installed, we expect to get a TypeError first,
but should still handle other errors in the usual way, whether they
appear on the first or second call to connect().
"""
# Clear connect_call_count
self.mock_ncclient.reset_mock()
err_strings = ['This is TypeError',
'This is IOError',
'This is AttributeError']
except_errors = [TypeError(err_strings[0]),
IOError(err_strings[1]),
AttributeError(err_strings[2])]
call_count = 0
for errors in except_errors:
config = {'connect.side_effect': errors}
self.mock_ncclient.configure_mock(**config)
port_context = self._generate_port_context(
self.test_configs['test_config1'])
e = self.assertRaises(
exceptions.NexusConnectFailed,
self._cisco_mech_driver.create_port_postcommit,
port_context)
self.assertIn(
"Create Failed: Port event can not "
"be processed at this time.", six.u(str(e)))
self._cisco_mech_driver.update_port_precommit(port_context)
e = self.assertRaises(
exceptions.NexusConnectFailed,
self._cisco_mech_driver.update_port_postcommit,
port_context)
self.assertIn(err_strings[call_count], six.u(str(e)))
self._cisco_mech_driver.delete_port_precommit(port_context)
e = self.assertRaises(
exceptions.NexusConnectFailed,
self._cisco_mech_driver.delete_port_postcommit,
port_context)
self.assertIn(err_strings[call_count], six.u(str(e)))
call_count += 1
self.assertEqual(
call_count * 3,
self.mock_ncclient.connect.call_count)
def test_get_nexus_type_failure(self):
"""Verifies exception during ncclient get inventory. """
self._create_port_failure(
'connect.return_value.get.side_effect',
'show inventory',
'test_config1',
'Create Failed:',
which_exc=exceptions.NexusConnectFailed)
# Verify we attempt to connect once. get_nexus_type is a
# special case since replay code will retry
self.assertEqual(1, self.mock_ncclient.connect.call_count)
def test_create_vlan_failure(self):
"""Verifies exception during edit vlan create driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'vlan vlan-id-create-delete 267',
'test_config1',
__name__)
# Verify we attempt to connect twice. First when first
# create_vlan fails then _edit_config loops to attempt
# it again and it fails again.
self.assertEqual(2, self.mock_ncclient.connect.call_count)
def test_delete_vlan_failure(self):
"""Verifies exception during edit vlan delete driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'vlan-id-create-delete no vlan 267',
'test_config1',
__name__)
def test_create_trunk_failure(self):
"""Verifies exception during create trunk interface driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'switchport trunk allowed vlan_id 267',
'test_config1',
__name__)
def test_delete_trunk_failure(self):
"""Verifies exception during delete trunk interface driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'switchport trunk allowed remove vlan 267',
'test_config1',
__name__)
def test_edit_fail_on_try_1(self):
"""Verifies reconnect during ncclient edit. """
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
config = {'connect.return_value.edit_config.side_effect':
self._config_side_effects_on_count(
'vlan vlan-id-create-delete 267',
Exception(__name__), range(1))}
self.mock_ncclient.configure_mock(**config)
self._create_port(self.test_configs['test_config1'])
# Verify we connected twice. Connect attempt 1 occurs on
# any first driver call. Then create-vlan fails first
# time resulting close of stale handle. Driver
# loops around to try and reopen and create-vlan should
# then be successful on the 2nd pass.
self.assertEqual(2, self.mock_ncclient.connect.call_count)
self.assertEqual(1,
self.mock_ncclient.connect.return_value.
close_session.call_count)
def test_nexus_host_not_configured(self):
"""Test handling of a host not found in our configuration.
If a host is not found in the cisco configuration the driver
should silently ignore (unknown host name is logged) and no database
or switch configuration is performed. Exercise against all APIs.
"""
test_func_list = [
self._cisco_mech_driver.create_port_postcommit,
self._cisco_mech_driver.update_port_precommit,
self._cisco_mech_driver.update_port_postcommit,
self._cisco_mech_driver.delete_port_precommit,
self._cisco_mech_driver.delete_port_postcommit,
]
self.mock_ncclient.reset_mock()
port_context = self._generate_port_context(
self.test_configs['test_config1'],
override_host_name='no_host')
for test_func in test_func_list:
test_func(port_context)
self.assertRaises(
exceptions.NexusPortBindingNotFound,
nexus_db_v2.get_nexusport_switch_bindings,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1)
assert not self.mock_ncclient.connect.called
def test_nexus_invalid_segment(self):
"""Test handling of a non VLAN segment.
Pass a FLAT segment type into the driver. Verify that no
exceptions are raised (non-VLAN segments are logged only) and
that no database or switch configuration is performed.
"""
test_func_list = [
self._cisco_mech_driver.create_port_postcommit,
self._cisco_mech_driver.update_port_precommit,
self._cisco_mech_driver.update_port_postcommit,
]
network_context = test_cisco_nexus_base.FakeNetworkContext(
0, p_const.TYPE_FLAT)
port_config = self.test_configs['test_config1']
port_context = test_cisco_nexus_base.FakePortContext(
port_config.instance_id,
port_config.host_name,
port_config.device_owner,
network_context, None,
port_config.profile,
port_config.vnic_type
)
# Clear out call_count changes during initialization activity
self.mock_ncclient.reset_mock()
for test_func in test_func_list:
test_func(port_context)
self.assertRaises(
exceptions.NexusPortBindingNotFound,
nexus_db_v2.get_nexusport_switch_bindings,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1)
assert not self.mock_ncclient.connect.called
def test_nexus_missing_fields(self):
"""Test handling of a NexusMissingRequiredFields exception.
Test the Cisco NexusMissingRequiredFields exception by using
empty device_id value during port creation.
"""
local_test_configs = {
'test_config1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
'',
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
}
# Clear out call_count changes during initialization activity
self.mock_ncclient.reset_mock()
self.assertRaises(
exceptions.NexusMissingRequiredFields,
self._create_port,
local_test_configs['test_config1'])
def test_nexus_segment_none(self):
"""Test handling of segment is None.
Verify that None segments do not throw an exception in
_port_action_xxx. None segments passed to the event handlers are
logged and are not processed.
"""
network_context = test_cisco_nexus_base.FakeNetworkContext(
0, p_const.TYPE_VLAN)
network_context._network_segments = None
port_config = self.test_configs['test_config1']
port_context = test_cisco_nexus_base.FakePortContext(
port_config.instance_id,
port_config.host_name,
port_config.device_owner,
network_context, None,
port_config.profile,
port_config.vnic_type
)
test_func_list = [
self._cisco_mech_driver.update_port_precommit,
self._cisco_mech_driver.update_port_postcommit,
]
# Clear out call_count changes during initialization activity
self.mock_ncclient.reset_mock()
for test_func in test_func_list:
test_func(port_context)
self.assertRaises(
exceptions.NexusPortBindingNotFound,
nexus_db_v2.get_nexusport_switch_bindings,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1)
assert not self.mock_ncclient.connect.called
def test_nexus_ncclient_disconnect(self):
"""Test handling of closing ncclient sessions.
When multi neutron-server processes are used verify that ncclient
close_session method is called.
"""
# Mock to keep track of number of close_session calls.
ncclient_close = mock.patch.object(
nexus_network_driver.CiscoNexusSshDriver,
'_close_session').start()
# Clear out call_count changes during initialization activity
self.mock_ncclient.reset_mock()
# Verify that ncclient is not closed by default.
self._basic_create_verify_port_vlan(
'test_config1',
self.results.get_test_results('duplicate_add_port_driver_result'))
assert not ncclient_close.called
self._basic_delete_verify_port_vlan(
'test_config1',
self.results.get_test_results('duplicate_del_port_driver_result'))
# Patch to close ncclient session.
mock.patch.object(nexus_network_driver.CiscoNexusSshDriver,
'_get_close_ssh_session',
return_value=True).start()
# Verify that ncclient close is called twice. Once for
# get_nexus_type during create_port_postcommit(). Then
# It is suppressed for successful create VLAN but called
# after trunk interface calls.
self._basic_create_verify_port_vlan(
'test_config1',
self.results.get_test_results(
'duplicate_add_port_driver_result'))
self.assertEqual(2, ncclient_close.call_count)
def test_nexus_extended_vlan_range_failure(self):
"""Test that extended VLAN range config errors are ignored.
Some versions of Nexus switch do not allow state changes for
the extended VLAN range (1006-4094), but these errors can be
ignored (default values are appropriate). Test that such errors
are ignored by the Nexus plugin.
"""
self._create_port_valid_exception(
'connect.return_value.edit_config.side_effect',
'vlan vlan-id-create-delete 267',
'test_config1',
"Can't modify state for extended")
# No reconnect attempted...call_count will be one
self.assertEqual(1, self.mock_ncclient.connect.call_count)
self._create_port_valid_exception(
'connect.return_value.edit_config.side_effect',
'vlan vlan-id-create-delete 265',
'test_config2',
"Command is only allowed on VLAN")
# No reconnect attempted...call_count will be 0 since reset_mock
# is called in _create_port_valid_exception and caching enabled
self.assertEqual(0, self.mock_ncclient.connect.call_count)
class TestCiscoNexusInitResults(
test_cisco_nexus_base.TestCiscoNexusBaseResults):
"""Unit tests driver results for Cisco ML2 Nexus."""
test_results = {
# set 1 - switch 1.1.1.1 sets eth 1/10 & 1/20 to None
# set 2 - switch 8.8.8.8 sets eth 1/10 & 1/20 to None
# set 3 - switch 4.4.4.4 sets eth 1/3 & portchannel 2 to None
# set 4 - switch 3.3.3.3 sets eth 1/20 to None
# set 5 - switch 2.2.2.2 sets portchannel 2 to None
# set 6 - switch 6.6.6.6 sets portchannel 2 to None
# set 7 - switch 7.7.7.7 sets portchannel 2 to None
'duplicate_init_port_driver_result1': (
[test_cisco_nexus_base.RESULT_INTERFACE.
format('ethernet', '1\/10', 'None'),
test_cisco_nexus_base.RESULT_INTERFACE.
format('ethernet', '1\/10', 'None'),
test_cisco_nexus_base.RESULT_INTERFACE.
format('ethernet', '1\/3', 'None'),
test_cisco_nexus_base.RESULT_INTERFACE.
format('ethernet', '1\/20', 'None'),
test_cisco_nexus_base.RESULT_INTERFACE.
format('portchannel', '2', 'None'),
test_cisco_nexus_base.RESULT_INTERFACE.
format('portchannel', '2', 'None'),
test_cisco_nexus_base.RESULT_INTERFACE.
format('portchannel', '2', 'None')]),
# Only one entry to match for last 3 so make None
# so count matches in _verify_results
'duplicate_init_port_driver_result2': (
[test_cisco_nexus_base.RESULT_INTERFACE.
format('ethernet', '1\/20', 'None'),
test_cisco_nexus_base.RESULT_INTERFACE.
format('ethernet', '1\/20', 'None'),
test_cisco_nexus_base.RESULT_INTERFACE.
format('portchannel', '2', 'None'),
None,
None,
None,
None])
}
class TestCiscoNexusDeviceInit(test_cisco_nexus_base.TestCiscoNexusBase,
TestCiscoNexusDeviceConfig):
"""Verifies interface vlan allowed none is set when missing."""
def mock_init(self):
# Prevent default which returns
# 'switchport trunk allowed vlan none'
# in get interface calls so Nexus driver
# initialization will send it to Nexus device.
data_xml = {'connect.return_value.get.return_value.data_xml':
''}
self.mock_ncclient.configure_mock(**data_xml)
def setUp(self):
"""Sets up mock ncclient, and switch and credentials dictionaries."""
cfg.CONF.set_override('never_cache_ssh_connection', False, 'ml2_cisco')
super(TestCiscoNexusDeviceInit, self).setUp()
self.results = TestCiscoNexusInitResults()
def test_verify_initialization(self):
self._verify_results(
self.results.get_test_results(
'duplicate_init_port_driver_result1'))
self._verify_results(
self.results.get_test_results(
'duplicate_init_port_driver_result2'))
class TestCiscoNexusBaremetalResults(
test_cisco_nexus_base.TestCiscoNexusBaseResults):
"""Unit tests driver results for Cisco ML2 Nexus."""
test_results = {
'add_port_ethernet_driver_result': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267)]),
'delete_port_ethernet_driver_result': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 267),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267)]),
'add_vm_port_ethernet_driver_result': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(265),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 265)]),
'delete_vm_port_ethernet_driver_result': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 265),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(265)]),
'add_port_channel_driver_result': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '469', 267)]),
'delete_port_channel_driver_result': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('port-channel', '469', 267),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267)]),
'add_port_ethernet_native_driver_result': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(265),
(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('ethernet', '1\/10', 265) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 265))]),
'delete_port_ethernet_native_driver_result': (
[(test_cisco_nexus_base.RESULT_DEL_NATIVE_INTERFACE.
format('ethernet', '1\/10') +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 265)),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(265)])
}
class TestCiscoNexusBaremetalDevice(test_cisco_nexus_base.TestCiscoNexusBase):
"""Unit tests for Cisco ML2 Nexus baremetal device driver."""
baremetal_profile = {
"local_link_information": [
{
"port_id": test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
"switch_info": {
"is_native": False,
"switch_ip": test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
},
},
]
}
baremetal_profile_is_native = {
"local_link_information": [
{
"port_id": test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
"switch_info": {
"is_native": True,
"switch_ip": test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
},
},
]
}
# The IP Address and Nexus Port information is duplicated in case
# of baremetal. The core code uses content of baremetal_profile
# While test code continues to use values in test_config
# for verification. This keeps test code simpler.
test_configs = {
'test_config1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_UNUSED,
test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_BAREMETAL,
baremetal_profile,
test_cisco_nexus_base.HOST_NAME_Baremetal + '1',
test_cisco_nexus_base.BAREMETAL_VNIC),
'test_config_vm':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_Baremetal + '1',
test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
test_cisco_nexus_base.INSTANCE_2,
test_cisco_nexus_base.VLAN_ID_2,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_config_native':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_UNUSED,
test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_2,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_BAREMETAL,
baremetal_profile_is_native,
None,
test_cisco_nexus_base.BAREMETAL_VNIC),
}
def setUp(self):
"""Sets up mock ncclient, and switch and credentials dictionaries."""
cfg.CONF.set_override('never_cache_ssh_connection', False, 'ml2_cisco')
super(TestCiscoNexusBaremetalDevice, self).setUp()
self.results = TestCiscoNexusBaremetalResults()
def _init_port_channel(self):
# this is to prevent interface initialization from occurring
# which adds unnecessary noise to the results.
data_xml = {'connect.return_value.get.return_value.data_xml':
'switchport trunk allowed vlan none\n'
'channel-group 469 mode active'}
self.mock_ncclient.configure_mock(**data_xml)
def test_create_delete_basic_bm_ethernet_port_and_vm(self):
"""Basic creation and deletion test of 1 ethernet port."""
self._basic_create_verify_port_vlan(
'test_config1',
self.results.get_test_results(
'add_port_ethernet_driver_result'))
# Clean all driver mock_calls so we can evaluate next
# set of results.
self.mock_ncclient.reset_mock()
self._basic_create_verify_port_vlan(
'test_config_vm',
self.results.get_test_results(
'add_vm_port_ethernet_driver_result'),
nbr_of_bindings=2)
self._basic_delete_verify_port_vlan(
'test_config_vm',
self.results.get_test_results(
'delete_vm_port_ethernet_driver_result'),
nbr_of_bindings=1)
self._basic_delete_verify_port_vlan(
'test_config1',
self.results.get_test_results(
'delete_port_ethernet_driver_result'))
def test_create_delete_basic_port_channel(self):
"""Basic creation and deletion test of 1 port-channel."""
self._init_port_channel()
self._basic_create_verify_port_vlan(
'test_config1',
self.results.get_test_results(
'add_port_channel_driver_result'))
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
self._basic_delete_verify_port_vlan(
'test_config1',
self.results.get_test_results(
'delete_port_channel_driver_result'))
def test_create_delete_basic_eth_port_is_native(self):
"""Basic creation and deletion test of 1 ethernet port."""
self._basic_create_verify_port_vlan(
'test_config_native',
self.results.get_test_results(
'add_port_ethernet_native_driver_result'))
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
self._basic_delete_verify_port_vlan(
'test_config_native',
self.results.get_test_results(
'delete_port_ethernet_native_driver_result'))
def test_create_delete_switch_ip_not_defined(self):
"""Create/delete of 1 ethernet port switchinfo is string."""
baremetal_profile_no_switch_ip = {
"local_link_information": [
# This IP is configured at init time
{
"port_id": test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
"switch_info": {
"is_native": False,
"switch_ip": "1.1.1.1",
},
},
# This IP not configured at init time
{
"port_id": test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
"switch_info": {
"is_native": False,
"switch_ip": "6.6.6.6",
},
},
]
}
local_test_configs = {
'test_config1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_UNUSED,
test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_BAREMETAL,
baremetal_profile_no_switch_ip,
test_cisco_nexus_base.HOST_NAME_Baremetal + '3',
test_cisco_nexus_base.BAREMETAL_VNIC),
}
self._basic_create_verify_port_vlan(
'',
self.results.get_test_results(
'add_port_ethernet_driver_result'), 1,
other_test=local_test_configs['test_config1'])
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
self._basic_delete_verify_port_vlan(
'',
self.results.get_test_results(
'delete_port_ethernet_driver_result'),
nbr_of_bindings=0,
other_test=local_test_configs['test_config1'])
def test_new_host_mapping_db(self):
nexus_db_v2.add_host_mapping(
"host-1", "1.1.1.1", "ethernet:1/1", 0, False)
nexus_db_v2.add_host_mapping(
"host-1", "2.2.2.2", "ethernet:1/1", 0, False)
nexus_db_v2.add_host_mapping(
"host-2", "1.1.1.1", "ethernet:2/2", 0, False)
nexus_db_v2.add_host_mapping(
"host-3", "3.3.3.3", "ethernet:3/3", 0, True)
nexus_db_v2.add_host_mapping(
"host-4", "4.4.4.4", "ethernet:4/4", 0, True)
# Do a get 1.1.1.1 and verify only host-1 is returned
mappings = nexus_db_v2.get_switch_if_host_mappings(
"1.1.1.1", "ethernet:1/1")
self.assertEqual(
len(mappings),
1,
"Unexpected number of switch interface mappings")
for map in mappings:
self.assertEqual(
map.host_id,
"host-1",
"Expecting host-1 returned from "
"get_switch_if_host_mappings")
# Do a get on host-1 and verify 2 entries returned
mappings = nexus_db_v2.get_host_mappings("host-1")
self.assertEqual(
len(mappings),
2,
"Unexpected number of host mappings")
for map in mappings:
self.assertEqual(
map.host_id,
"host-1",
"Expecting host-1 returned from "
"get_host_mappings")
self.assertEqual(
map.if_id,
"ethernet:1/1",
"Expecting interface returned from "
"get_host_mappings")
# Do a get on switch 1.1.1.1 and verify 2 entries returned
mappings = nexus_db_v2.get_switch_host_mappings("1.1.1.1")
self.assertEqual(
len(mappings),
2,
"Unexpected number of switch mappings")
for map in mappings:
self.assertEqual(
map.switch_ip,
"1.1.1.1",
"Expecting switch_ip returned from "
"get_switch_host_mappings")
# Update host mapping by changing the ch_grp
nexus_db_v2.update_host_mapping(
"host-2",
"ethernet:2/2",
"1.1.1.1",
2)
mappings = nexus_db_v2.get_host_mappings("host-2")
self.assertEqual(
len(mappings),
1,
"Unexpected number of host mappings aft update")
for map in mappings:
self.assertEqual(
map.host_id,
"host-2",
"Expecting host-2 returned from "
"get_host_mappings")
self.assertEqual(
map.ch_grp,
2,
"Expecting ch_grp 2 returned from "
"get_host_mappings for host 2")
# remove 1 host mapping
nexus_db_v2.remove_host_mapping(
"ethernet:2/2", "1.1.1.1")
# Verify it is gone
self.assertRaises(
exceptions.NexusHostMappingNotFound,
nexus_db_v2.get_host_mappings,
"host-2")
# remove all static host mapping
nexus_db_v2.remove_all_static_host_mappings()
# Verify it is gone
mappings = nexus_db_v2.get_all_host_mappings()
self.assertEqual(
len(mappings),
2,
"Unexpected number of non-static entries")
for map in mappings:
self.assertFalse(
map.is_static,
"Expecting remaining hosts from"
"get_all_host_mappings to be dynamic")
# remove host mappings
nexus_db_v2.remove_host_mapping(
"ethernet:1/1", "2.2.2.2")
nexus_db_v2.remove_host_mapping(
"ethernet:1/1", "1.1.1.1")
# Verify it is gone
self.assertRaises(
exceptions.NexusHostMappingNotFound,
nexus_db_v2.get_host_mappings,
"host-1")
class TestCiscoNexusNonCacheSshDevice(
test_cisco_nexus_base.TestCiscoNexusBase):
"""Unit tests for Cisco ML2 Nexus device driver in non-cache ssh mode."""
# Testing new default of True for config var 'never_cache_ssh_connection'
test_configs = {
'test_config1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
}
simple_add_port_ethernet_driver_result = (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267)])
simple_delete_port_ethernet_driver_result = (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 267),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267)])
def test_create_delete_basic(self):
"""Basic creation and deletion test of 1 ethernet port."""
# Clean all the ncclient mock_calls so we can evaluate
# results of add operation.
self.mock_ncclient.reset_mock()
# Call _create_port directly without verification
# We know at this point that this works.
self._create_port(self.test_configs['test_config1'])
# The objective is to verify call count when caching disabled
self.assertEqual(2, self.mock_ncclient.connect.call_count)
self.assertEqual(2,
self.mock_ncclient.connect.return_value.
close_session.call_count)
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
# nbr_of_bindings includes reserved port binding
self._delete_port(self.test_configs['test_config1'])
self.assertEqual(2, self.mock_ncclient.connect.call_count)
self.assertEqual(2,
self.mock_ncclient.connect.return_value.
close_session.call_count)
def test_edit_fail_on_try_1(self):
"""Verifies reconnect during ncclient edit. """
# Clean all the ncclient mock_calls so we can evaluate
# results of delete operations.
self.mock_ncclient.reset_mock()
config = {'connect.return_value.edit_config.side_effect':
self._config_side_effects_on_count(
'vlan vlan-id-create-delete 267',
Exception(__name__), range(1))}
self.mock_ncclient.configure_mock(**config)
self._create_port(self.test_configs['test_config1'])
# With ssh handle not patched, there will be 3 connects
# and 3 closes.
# 1) Full connect during create_port get nexus type call
# and close after this call.
# 2) Full connect during update_port on first failed
# create_vlan, then close on error. Driver then
# loops back and performs a full reconnect on
# successful send of create_vlan.
# The close operation is skipped following this.
# 3) When interface configuration is sent, a close
# is then perform to complete this transaction set.
self.assertEqual(3, self.mock_ncclient.connect.call_count)
self.assertEqual(3,
self.mock_ncclient.connect.return_value.
close_session.call_count)
|
|
# import demistomock as demisto
from CommonServerPython import *
from datetime import datetime
import urllib3
import dateparser
import traceback
from typing import Any, Dict, List, Optional, Tuple, cast, Iterable
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
MAX_EVENTS_TO_FETCH = 50
COGNNI_SEVERITIES = ['Low', 'Medium', 'High', 'Critical']
SUNDAY_ISO_WEEKDAY = 7
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
"""
def fetch_key(self, api_key: str) -> Dict[str, Any]:
return self._http_request(
method='GET',
url_suffix=f"/api/v1/login/key/{api_key}"
)
def graphql(self, query: str, variables: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
if not variables:
variables = {}
graphql_operation = {
"query": query,
"variables": variables
}
res = self._http_request(
method='POST',
url_suffix='/intelligence/data/graphql',
json_data=graphql_operation
)
return res['data']
def ping(self) -> Dict[str, Any]:
query = "{ping}"
return self.graphql(
query=query
)
def fetch_events(self, min_severity: int, start_time: str, events_limit: int, offset: int) -> List[Dict[str, Any]]:
query = """
query($severityValue:String!, $pagination:Pagination) {
events(
filter: {
coordinates: [
{
x: {
type: None,
value: "none"
},
y: {
type: Severity,
value: $severityValue
},
z: {
type:Week,
values:[\"""" + start_time + """\"]
}
}
]
pagination: $pagination
}
) {
eventId: id
description
severity
sourceApplication
date
items {
itemId: id
externalId
type
name
clusterUID
data
createdAt
labels {
name
}
}
insights {
name
}
}
}
"""
variables = {
"pagination": {
"limit": events_limit,
"offset": offset,
"direction": "Ascend"
},
"severityValue": str(min_severity),
}
res = self.graphql(
query=query,
variables=variables
)
return res['events']
def get_event(self, event_id: str) -> Dict[str, Any]:
query = """
query ($event_id: ID!) {
event(id: $event_id){
id
description
sourceApplication
date
}
}
"""
variables = {
"event_id": event_id
}
res = self.graphql(
query=query,
variables=variables
)
return res['event']
def fetch_insights(self, min_severity: int) -> List[Dict[str, Any]]:
query = """
query ($min_severity: Int) {
insights(minSeverity: $min_severity){
id
description
name
severity
}
}
"""
variables = {
"min_severity": int(min_severity)
}
res = self.graphql(
query=query,
variables=variables
)
return res['insights']
def get_insight(self, insight_id: str) -> Dict[str, Any]:
query = """
query ($insight_id: ID!) {
insight(id: $insight_id) {
id
name
description
severity
}
}
"""
variables = {
"insight_id": insight_id
}
res = self.graphql(
query=query,
variables=variables
)
return res['insight']
''' HELPER FUNCTIONS '''
def convert_to_demisto_severity(severity: str) -> int:
"""Maps Cognni severity to Cortex XSOAR severity
Converts the Cognni alert severity level ('Low', 'Medium',
'High', 'Critical') to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``str``
:param severity: severity as returned from the Cognni API (str)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int``
"""
return {
'Low': 1, # low severity
'Medium': 2, # medium severity
'High': 3, # high severity
'Critical': 4 # critical severity
}[severity]
def convert_to_demisto_severity_int(severity: int) -> int:
"""Maps Cognni severity to Cortex XSOAR severity
Converts the Cognni alert severity level ('Low', 'Medium',
'High', 'Critical') to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``str``
:param severity: severity as returned from the Cognni API (str)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int``
"""
return severity
def arg_to_int(arg: Any, arg_name: str, required: bool = False) -> Optional[int]:
"""Converts an XSOAR argument to a Python int
This function is used to quickly validate an argument provided to XSOAR
via ``demisto.args()`` into an ``int`` type. It will throw a ValueError
if the input is invalid. If the input is None, it will throw a ValueError
if required is ``True``, or ``None`` if required is ``False.
:type arg: ``Any``
:param arg: argument to convert
:type arg_name: ``str``
:param arg_name: argument name
:type required: ``bool``
:param required:
throws exception if ``True`` and argument provided is None
:return:
returns an ``int`` if arg can be converted
returns ``None`` if arg is ``None`` and required is set to ``False``
otherwise throws an Exception
:rtype: ``Optional[int]``
"""
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str):
if arg.isdigit():
return int(arg)
raise ValueError(f'Invalid number: "{arg_name}"="{arg}"')
if isinstance(arg, int):
return arg
raise ValueError(f'Invalid number: "{arg_name}"')
def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> Optional[int]:
"""Converts an XSOAR argument to a timestamp (seconds from epoch)
This function is used to quickly validate an argument provided to XSOAR
via ``demisto.args()`` into an ``int`` containing a timestamp (seconds
since epoch). It will throw a ValueError if the input is invalid.
If the input is None, it will throw a ValueError if required is ``True``,
or ``None`` if required is ``False.
:type arg: ``Any``
:param arg: argument to convert
:type arg_name: ``str``
:param arg_name: argument name
:type required: ``bool``
:param required:
throws exception if ``True`` and argument provided is None
:return:
returns an ``int`` containing a timestamp (seconds from epoch) if conversion works
returns ``None`` if arg is ``None`` and required is set to ``False``
otherwise throws an Exception
:rtype: ``Optional[int]``
"""
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str) and arg.isdigit():
return int(arg)
if isinstance(arg, str):
date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'})
if date is None:
raise ValueError(f'Invalid date: {arg_name}')
return int(date.timestamp())
if isinstance(arg, (int, float)):
return int(arg)
raise ValueError(f'Invalid date: "{arg_name}"')
def flatten_event_file_items(event: Dict[str, Any]):
if not event or not event['items']:
return []
return list(map(lambda item: {
"eventId": event.get('eventId'),
"fileName": item.get('name'),
"fileId": item.get('itemId'),
"name": item.get('name'),
"eventType": item.get('type'),
"description": event.get('description'),
"date": event.get('date'),
"severity": event.get('severity'),
"sourceApplication": event.get('sourceApplication')
}, event['items']))
def convert_file_event_to_incident(file_event: Dict[str, Any]):
return {
'name': file_event.get('name'),
'details': file_event['description'],
'occurred': file_event.get('date'),
'rawJSON': json.dumps(file_event),
'severity': convert_to_demisto_severity_int(file_event.get('severity', 1)),
}
def convert_events_to_incidents(events: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]:
if not events:
return []
file_events: List[Dict[str, Any]] = sum(map(flatten_event_file_items, events), [])
incidents = list(map(convert_file_event_to_incident, file_events))
return incidents
def find_latest_event(events: Iterable[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
last_date = 0
latest_event = None
for event in events:
event_date = date_to_timestamp(
date_str_or_dt=event.get('date', ''),
date_format='%Y-%m-%dT%H:%M:%S.000Z')
if last_date < event_date:
last_date = event_date
latest_event = event
return latest_event
''' COMMAND FUNCTIONS '''
def test_module(client: Client, api_key: str, first_fetch: int) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param client: Cognni client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
answer = ''
client.fetch_key(api_key)
except ValueError:
answer += 'The api key is invalid'
try:
timestamp_to_datestring(timestamp=first_fetch * 1000, date_format="%Y-%m-%d")
except ValueError:
answer += 'Incorrect first fetch time format, should be YYYY-MM-DD'
if not answer:
return 'ok'
else:
return answer
def fetch_incidents(client: Client, last_run: Dict[str, int],
first_fetch_time: Optional[int],
events_limit: int,
min_severity: int
) -> Tuple[Dict[str, int], List[dict]]:
"""This function retrieves new alerts every interval (default is 1 minute).
This function has to implement the logic of making sure that incidents are
fetched only once and no incidents are missed. By default it's invoked by
XSOAR every minute. It will use last_run to save the timestamp of the last
incident it processed. If last_run is not provided, it should use the
integration parameter first_fetch_time to determine when to start fetching
the first time.
:param events_limit:
:type client: ``Client``
:param client: Cognni client to use
:type last_run: ``Optional[Dict[str, int]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:type first_fetch_time: ``Optional[int]``
:param first_fetch_time:
If last_run is None (first time we are fetching), it contains
the timestamp in milliseconds on when to start fetching incidents
:type min_severity: ``str``
:param min_severity:
minimum severity of the alert to search for.
Options are: "Low", "Medium", "High", "Critical"
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, int], List[dict]]``
"""
last_fetch = last_run.get('last_fetch', None)
is_initial_run = last_run.get('is_initial_run', True)
offset = last_run.get('offset')
if last_fetch is None:
last_fetch = first_fetch_time
else:
last_fetch = int(last_fetch)
latest_created_time = cast(int, last_fetch)
if offset is None or (
not is_initial_run
and datetime.utcnow().isoweekday() == SUNDAY_ISO_WEEKDAY
and datetime.utcfromtimestamp(latest_created_time).isoweekday() != SUNDAY_ISO_WEEKDAY):
offset = 0
events = client.fetch_events(
events_limit=events_limit,
offset=offset,
start_time=timestamp_to_datestring(timestamp=latest_created_time * 1000, is_utc=True),
min_severity=min_severity
)
if not events:
next_run = {'last_fetch': latest_created_time, 'offset': offset, 'is_initial_run': False}
return next_run, list()
latest_event = find_latest_event(events)
if latest_event:
latest_created_time = int(date_to_timestamp(
date_str_or_dt=latest_event.get('date', latest_created_time),
date_format='%Y-%m-%dT%H:%M:%S.000Z'
) / 1000)
incidents = convert_events_to_incidents(events)
next_run = {'last_fetch': latest_created_time,
'offset': offset + len(events),
'is_initial_run': is_initial_run}
return next_run, incidents
def get_event_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""cognni-get-event command: Returns a Cognni event
:type client: ``Client``
:param client: Cognni client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['event_id']`` alert ID to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an alert
:rtype: ``CommandResults``
"""
event_id = args.get('event_id', None)
if not event_id:
raise ValueError('event_id not specified')
event = client.get_event(event_id=event_id)
readable_output = tableToMarkdown(f'Cognni event {event_id}', event)
return CommandResults(
readable_output=readable_output,
outputs_prefix='Cognni.event',
outputs_key_field='id',
outputs=event
)
def fetch_insights_command(client: Client, args: Dict[str, Any]) -> CommandResults:
min_severity = int(args.get('min_severity', 2))
insights = client.fetch_insights(min_severity=min_severity)
readable_output = tableToMarkdown(f'Cognni {len(insights)} insights', insights)
return CommandResults(
readable_output=readable_output,
outputs_prefix='Cognni.insights',
outputs_key_field='id',
outputs=insights
)
def get_insight_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""cognni-get-insight command: Returns a Cognni event
:type client: ``Client``
:param client: Cognni client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['event_id']`` alert ID to return
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains an alert
:rtype: ``CommandResults``
"""
insight_id = args.get('insight_id', None)
if not insight_id:
raise ValueError('insight_id not specified')
insight = client.get_insight(insight_id=insight_id)
readable_output = tableToMarkdown(f'Cognni event {insight_id}', insight)
return CommandResults(
readable_output=readable_output,
outputs_prefix='Cognni.insight',
outputs_key_field='id',
outputs=insight
)
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
api_key = demisto.params().get('apikey')
# get the service API url
base_url = demisto.params()['url']
verify_certificate = not demisto.params().get('insecure', False)
first_fetch_time = arg_to_timestamp(
arg=demisto.params().get('first_fetch', '3 days'),
arg_name='First fetch time',
required=True
)
assert isinstance(first_fetch_time, int)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, api_key, first_fetch_time)
return_results(result)
else:
fetch_key_res = client.fetch_key(api_key)
access_token = fetch_key_res['token']
headers = {
'Authorization': f'Bearer {access_token}',
'Content-Type': 'application/json',
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'fetch-incidents':
min_severity = demisto.params().get('min_severity', None)
max_fetch = arg_to_int(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_fetch or max_fetch > MAX_EVENTS_TO_FETCH:
max_fetch = MAX_EVENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
events_limit=max_fetch,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_time,
min_severity=convert_to_demisto_severity(min_severity)
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'cognni-get-event':
return_results(get_event_command(client, demisto.args()))
elif demisto.command() == 'cognni-fetch-insights':
return_results(fetch_insights_command(client, demisto.args()))
elif demisto.command() == 'cognni-get-insight':
return_results(get_insight_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from urlparse import urljoin
import os
# 3rd party
import mock
import json
from tests.checks.common import AgentCheckTest, Fixtures
# ID
APP_ID = 'application_1453738555560_0001'
APP_NAME = 'WordCount'
JOB_ID = 'job_1453738555560_0001'
JOB_NAME = 'WordCount'
USER_NAME = 'vagrant'
TASK_ID = 'task_1453738555560_0001_m_000000'
CLUSTER_NAME = 'MapReduceCluster'
# Resource manager URI
RM_URI = 'http://localhost:8088'
# URL Paths
YARN_APPS_PATH = 'ws/v1/cluster/apps'
MAPREDUCE_JOBS_PATH = 'ws/v1/mapreduce/jobs'
# Service Check Names
YARN_SERVICE_CHECK = 'mapreduce.resource_manager.can_connect'
MAPREDUCE_SERVICE_CHECK = 'mapreduce.application_master.can_connect'
def join_url_dir(url, *args):
'''
Join a URL with multiple directories
'''
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
# Service URLs
YARN_APPS_URL = urljoin(RM_URI, YARN_APPS_PATH) + '?states=RUNNING&applicationTypes=MAPREDUCE'
MR_JOBS_URL = join_url_dir(RM_URI, 'proxy', APP_ID, MAPREDUCE_JOBS_PATH)
MR_JOB_COUNTERS_URL = join_url_dir(MR_JOBS_URL, JOB_ID, 'counters')
MR_TASKS_URL = join_url_dir(MR_JOBS_URL, JOB_ID, 'tasks')
def requests_get_mock(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
ci_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "ci")
if args[0] == YARN_APPS_URL:
with open(Fixtures.file('apps_metrics', sdk_dir=ci_dir), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == MR_JOBS_URL:
with open(Fixtures.file('job_metrics', sdk_dir=ci_dir), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == MR_JOB_COUNTERS_URL:
with open(Fixtures.file('job_counter_metrics', sdk_dir=ci_dir), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif args[0] == MR_TASKS_URL:
with open(Fixtures.file('task_metrics', sdk_dir=ci_dir), 'r') as f:
body = f.read()
return MockResponse(body, 200)
class MapReduceCheck(AgentCheckTest):
CHECK_NAME = 'mapreduce'
MR_CONFIG = {
'resourcemanager_uri': 'http://localhost:8088',
'cluster_name': CLUSTER_NAME,
'collect_task_metrics': 'true'
}
INIT_CONFIG = {
'general_counters': [
{
'counter_group_name': 'org.apache.hadoop.mapreduce.FileSystemCounter',
'counters': [
{'counter_name': 'FILE_BYTES_READ'},
{'counter_name': 'FILE_BYTES_WRITTEN'}
]
}
],
'job_specific_counters': [
{
'job_name': 'WordCount',
'metrics': [
{
'counter_group_name': 'org.apache.hadoop.mapreduce.FileSystemCounter',
'counters': [
{'counter_name': 'FILE_BYTES_WRITTEN'}
]
}, {
'counter_group_name': 'org.apache.hadoop.mapreduce.TaskCounter',
'counters': [
{'counter_name': 'MAP_OUTPUT_RECORDS'}
]
}
]
}
]
}
MAPREDUCE_JOB_METRIC_VALUES = {
'mapreduce.job.elapsed_time.max': 99221829,
'mapreduce.job.maps_total': 1,
'mapreduce.job.maps_completed': 0,
'mapreduce.job.reduces_total': 1,
'mapreduce.job.reduces_completed': 0,
'mapreduce.job.maps_pending': 0,
'mapreduce.job.maps_running': 1,
'mapreduce.job.reduces_pending': 1,
'mapreduce.job.reduces_running': 0,
'mapreduce.job.new_reduce_attempts': 1,
'mapreduce.job.running_reduce_attempts': 0,
'mapreduce.job.failed_reduce_attempts': 0,
'mapreduce.job.killed_reduce_attempts': 0,
'mapreduce.job.successful_reduce_attempts': 0,
'mapreduce.job.new_map_attempts': 0,
'mapreduce.job.running_map_attempts': 1,
'mapreduce.job.failed_map_attempts': 1,
'mapreduce.job.killed_map_attempts': 0,
'mapreduce.job.successful_map_attempts': 0,
}
MAPREDUCE_JOB_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'job_name:' + JOB_NAME,
'user_name:' + USER_NAME
]
MAPREDUCE_MAP_TASK_METRIC_VALUES = {
'mapreduce.job.map.task.elapsed_time.max': 99869037
}
MAPREDUCE_MAP_TASK_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'job_name:' + JOB_NAME,
'user_name:' + USER_NAME,
'task_type:map'
]
MAPREDUCE_REDUCE_TASK_METRIC_VALUES = {
'mapreduce.job.reduce.task.elapsed_time.max': 123456
}
MAPREDUCE_REDUCE_TASK_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'job_name:' + JOB_NAME,
'user_name:' + USER_NAME,
'task_type:reduce'
]
MAPREDUCE_JOB_COUNTER_METRIC_VALUES_READ = {
'mapreduce.job.counter.total_counter_value': {'value': 0, 'tags': ['counter_name:file_bytes_read']},
'mapreduce.job.counter.map_counter_value': {'value': 1, 'tags': ['counter_name:file_bytes_read']},
'mapreduce.job.counter.reduce_counter_value': {'value': 2, 'tags': ['counter_name:file_bytes_read']},
}
MAPREDUCE_JOB_COUNTER_METRIC_VALUES_WRITTEN = {
'mapreduce.job.counter.total_counter_value': {'value': 3, 'tags': ['counter_name:file_bytes_written']},
'mapreduce.job.counter.map_counter_value': {'value': 4, 'tags': ['counter_name:file_bytes_written']},
'mapreduce.job.counter.reduce_counter_value': {'value': 5, 'tags': ['counter_name:file_bytes_written']},
}
MAPREDUCE_JOB_COUNTER_METRIC_VALUES_RECORDS = {
'mapreduce.job.counter.total_counter_value': {'value': 9, 'tags': ['counter_name:map_output_records']},
'mapreduce.job.counter.map_counter_value': {'value': 10, 'tags': ['counter_name:map_output_records']},
'mapreduce.job.counter.reduce_counter_value': {'value': 11, 'tags': ['counter_name:map_output_records']},
}
MAPREDUCE_JOB_COUNTER_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'job_name:' + JOB_NAME,
'user_name:' + USER_NAME
]
@mock.patch('requests.get', side_effect=requests_get_mock)
def test_check(self, mock_requests):
config = {
'instances': [self.MR_CONFIG],
'init_config': self.INIT_CONFIG
}
self.run_check(config)
# Check the MapReduce job metrics
for metric, value in self.MAPREDUCE_JOB_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.MAPREDUCE_JOB_METRIC_TAGS)
# Check the map task metrics
for metric, value in self.MAPREDUCE_MAP_TASK_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.MAPREDUCE_MAP_TASK_METRIC_TAGS)
# Check the reduce task metrics
for metric, value in self.MAPREDUCE_REDUCE_TASK_METRIC_VALUES.iteritems():
self.assertMetric(metric,
value=value,
tags=self.MAPREDUCE_REDUCE_TASK_METRIC_TAGS)
# Check the MapReduce job counter metrics
for metric, attributes in self.MAPREDUCE_JOB_COUNTER_METRIC_VALUES_READ.iteritems():
tags = attributes['tags']
tags.extend(self.MAPREDUCE_JOB_COUNTER_METRIC_TAGS)
self.assertMetric(metric,
value=attributes['value'],
tags=tags)
# Check the MapReduce job counter metrics
for metric, attributes in self.MAPREDUCE_JOB_COUNTER_METRIC_VALUES_WRITTEN.iteritems():
tags = attributes['tags']
tags.extend(self.MAPREDUCE_JOB_COUNTER_METRIC_TAGS)
self.assertMetric(metric,
value=attributes['value'],
tags=tags)
# Check the MapReduce job counter metrics
for metric, attributes in self.MAPREDUCE_JOB_COUNTER_METRIC_VALUES_RECORDS.iteritems():
tags = attributes['tags']
tags.extend(self.MAPREDUCE_JOB_COUNTER_METRIC_TAGS)
self.assertMetric(metric,
value=attributes['value'],
tags=tags)
# Check the service tests
self.assertServiceCheckOK(YARN_SERVICE_CHECK,
tags=['url:http://localhost:8088'])
self.assertServiceCheckOK(MAPREDUCE_SERVICE_CHECK,
tags=['url:http://localhost:8088'])
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import time
from django.utils.translation import ugettext as _
from desktop.conf import USE_DEFAULT_CONFIGURATION
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import force_unicode
from desktop.lib.rest.http_client import RestException
from desktop.models import DefaultConfiguration
from notebook.data_export import download as spark_download
from notebook.connectors.base import Api, QueryError, SessionExpired, _get_snippet_session
LOG = logging.getLogger(__name__)
try:
from spark.conf import LIVY_SERVER_SESSION_KIND
from spark.job_server_api import get_api as get_spark_api
except ImportError, e:
LOG.exception('Spark is not enabled')
class SparkConfiguration(object):
APP_NAME = 'spark'
PROPERTIES = [
{
"name": "jars",
"nice_name": _("Jars"),
"help_text": _("Add one or more JAR files to the list of resources."),
"type": "csv-hdfs-files",
"is_yarn": False,
"multiple": True,
"defaultValue": [],
"value": [],
}, {
"name": "files",
"nice_name": _("Files"),
"help_text": _("Files to be placed in the working directory of each executor."),
"type": "csv-hdfs-files",
"is_yarn": False,
"multiple": True,
"defaultValue": [],
"value": [],
}, {
"name": "pyFiles",
"nice_name": _("pyFiles"),
"help_text": _("Python files to be placed in the working directory of each executor."),
"type": "csv-hdfs-files",
"is_yarn": False,
"multiple": True,
"defaultValue": [],
"value": [],
}, {
"name": "driverMemory",
"nice_name": _("Driver Memory"),
"help_text": _("Amount of memory to use for the driver process in GB. (Default: 1). "),
"type": "jvm",
"is_yarn": False,
"multiple": False,
"defaultValue": '1G',
"value": '1G',
},
# YARN-only properties
{
"name": "driverCores",
"nice_name": _("Driver Cores"),
"help_text": _("Number of cores used by the driver, only in cluster mode (Default: 1)"),
"type": "number",
"is_yarn": True,
"multiple": False,
"defaultValue": 1,
"value": 1,
}, {
"name": "executorMemory",
"nice_name": _("Executor Memory"),
"help_text": _("Amount of memory to use per executor process in GB. (Default: 1)"),
"type": "jvm",
"is_yarn": True,
"multiple": False,
"defaultValue": '1G',
"value": '1G',
}, {
"name": "executorCores",
"nice_name": _("Executor Cores"),
"help_text": _("Number of cores used by the driver, only in cluster mode (Default: 1)"),
"type": "number",
"is_yarn": True,
"multiple": False,
"defaultValue": 1,
"value": 1,
}, {
"name": "queue",
"nice_name": _("Queue"),
"help_text": _("The YARN queue to submit to, only in cluster mode (Default: default)"),
"type": "string",
"is_yarn": True,
"multiple": False,
"defaultValue": 'default',
"value": 'default',
}, {
"name": "archives",
"nice_name": _("Archives"),
"help_text": _("Archives to be extracted into the working directory of each executor, only in cluster mode."),
"type": "csv-hdfs-files",
"is_yarn": True,
"multiple": True,
"defaultValue": [],
"value": [],
}
]
class SparkApi(Api):
SPARK_UI_RE = re.compile("Started SparkUI at (http[s]?://([0-9a-zA-Z-_\.]+):(\d+))")
YARN_JOB_RE = re.compile("tracking URL: (http[s]?://.+/)")
STANDALONE_JOB_RE = re.compile("Got job (\d+)")
@staticmethod
def get_properties():
return SparkConfiguration.PROPERTIES
def create_session(self, lang='scala', properties=None):
if not properties:
config = None
if USE_DEFAULT_CONFIGURATION.get():
config = DefaultConfiguration.objects.get_configuration_for_user(app='spark', user=self.user)
if config is not None:
properties = config.properties_list
else:
properties = self.get_properties()
props = dict([(p['name'], p['value']) for p in properties]) if properties is not None else {}
props['kind'] = lang
api = get_spark_api(self.user)
response = api.create_session(**props)
status = api.get_session(response['id'])
count = 0
while status['state'] == 'starting' and count < 120:
status = api.get_session(response['id'])
count += 1
time.sleep(1)
if status['state'] != 'idle':
info = '\n'.join(status['log']) if status['log'] else 'timeout'
raise QueryError(_('The Spark session could not be created in the cluster: %s') % info)
return {
'type': lang,
'id': response['id'],
'properties': properties
}
def execute(self, notebook, snippet):
api = get_spark_api(self.user)
session = _get_snippet_session(notebook, snippet)
try:
response = api.submit_statement(session['id'], snippet['statement'])
return {
'id': response['id'],
'has_result_set': True,
}
except Exception, e:
message = force_unicode(str(e)).lower()
if 'session not found' in message or 'connection refused' in message or 'session is in state busy' in message:
raise SessionExpired(e)
else:
raise e
def check_status(self, notebook, snippet):
api = get_spark_api(self.user)
session = _get_snippet_session(notebook, snippet)
cell = snippet['result']['handle']['id']
try:
response = api.fetch_data(session['id'], cell)
return {
'status': response['state'],
}
except Exception, e:
message = force_unicode(str(e)).lower()
if 'session not found' in message:
raise SessionExpired(e)
else:
raise e
def fetch_result(self, notebook, snippet, rows, start_over):
api = get_spark_api(self.user)
session = _get_snippet_session(notebook, snippet)
cell = snippet['result']['handle']['id']
try:
response = api.fetch_data(session['id'], cell)
except Exception, e:
message = force_unicode(str(e)).lower()
if 'session not found' in message:
raise SessionExpired(e)
else:
raise e
content = response['output']
if content['status'] == 'ok':
data = content['data']
images = []
try:
table = data['application/vnd.livy.table.v1+json']
except KeyError:
try:
images = [data['image/png']]
except KeyError:
images = []
data = [[data['text/plain']]]
meta = [{'name': 'Header', 'type': 'STRING_TYPE', 'comment': ''}]
type = 'text'
else:
data = table['data']
headers = table['headers']
meta = [{'name': h['name'], 'type': h['type'], 'comment': ''} for h in headers]
type = 'table'
# Non start_over not supported
if not start_over:
data = []
return {
'data': data,
'images': images,
'meta': meta,
'type': type
}
elif content['status'] == 'error':
tb = content.get('traceback', None)
if tb is None or not tb:
msg = content.get('ename', 'unknown error')
evalue = content.get('evalue')
if evalue is not None:
msg = '%s: %s' % (msg, evalue)
else:
msg = ''.join(tb)
raise QueryError(msg)
def download(self, notebook, snippet, format):
try:
api = get_spark_api(self.user)
session = _get_snippet_session(notebook, snippet)
cell = snippet['result']['handle']['id']
return spark_download(api, session['id'], cell, format)
except Exception, e:
raise PopupException(e)
def cancel(self, notebook, snippet):
api = get_spark_api(self.user)
session = _get_snippet_session(notebook, snippet)
response = api.cancel(session['id'])
return {'status': 0}
def get_log(self, notebook, snippet, startFrom=0, size=None):
api = get_spark_api(self.user)
session = _get_snippet_session(notebook, snippet)
return api.get_log(session['id'], startFrom=startFrom, size=size)
def progress(self, snippet, logs):
return 50
def close_statement(self, snippet): # Individual statements cannot be closed
pass
def close_session(self, session):
api = get_spark_api(self.user)
if session['id'] is not None:
try:
api.close(session['id'])
return {
'session': session['id'],
'status': 0
}
except RestException, e:
if e.code == 404 or e.code == 500: # TODO remove the 500
raise SessionExpired(e)
else:
return {'status': -1}
def get_jobs(self, notebook, snippet, logs):
if self._is_yarn_mode():
# Tracking URL is found at the start of the logs
start_logs = self.get_log(notebook, snippet, startFrom=0, size=100)
return self._get_yarn_jobs(start_logs)
else:
return self._get_standalone_jobs(logs)
def _get_standalone_jobs(self, logs):
job_ids = set([])
# Attempt to find Spark UI Host and Port from startup logs
spark_ui_url = self.SPARK_UI_RE.search(logs)
if not spark_ui_url:
LOG.warn('Could not find the Spark UI URL in the session logs.')
return []
else:
spark_ui_url = spark_ui_url.group(1)
# Standalone/Local mode runs on same host as Livy, attempt to find Job IDs in Spark log
for match in self.STANDALONE_JOB_RE.finditer(logs):
job_id = match.group(1)
job_ids.add(job_id)
jobs = [{
'name': job_id,
'url': '%s/jobs/job/?id=%s' % (spark_ui_url, job_id)
} for job_id in job_ids]
return jobs
def _get_yarn_jobs(self, logs):
tracking_urls = set([])
# YARN mode only outputs the tracking-proxy URL, not Job IDs
for match in self.YARN_JOB_RE.finditer(logs):
url = match.group(1)
tracking_urls.add(url)
jobs = [{
'name': url.strip('/').split('/')[-1], # application_id is the last token
'url': url
} for url in tracking_urls]
return jobs
def _is_yarn_mode(self):
return LIVY_SERVER_SESSION_KIND.get() == "yarn"
|
|
import pygame
from pygame.locals import *
from .const import *
from . import widget
from . import table
from . import basic
from . import pguglobals
from .errors import PguError
_SLIDER_HORIZONTAL = 0
_SLIDER_VERTICAL = 1
class _slider(widget.Widget):
_value = None
def __init__(self,value,orient,min,max,size,step=1,**params):
params.setdefault('cls','slider')
widget.Widget.__init__(self,**params)
self.min,self.max,self.value,self.orient,self.size,self.step = min,max,value,orient,size,step
self.style.check("bar")
def paint(self,s):
self.value = self.value
r = pygame.rect.Rect(0,0,self.style.width,self.style.height)
if self.orient == _SLIDER_HORIZONTAL:
r.x = (self.value-self.min) * (r.w-self.size) / max(1,self.max-self.min);
r.w = self.size;
else:
r.y = (self.value-self.min) * (r.h-self.size) / max(1,self.max-self.min);
r.h = self.size;
self.bar = r
pguglobals.app.theme.render(s,self.style.bar,r)
def event(self,e):
used = None
r = pygame.rect.Rect(0,0,self.style.width,self.style.height)
adj = 0
if e.type == ENTER: self.repaint()
elif e.type == EXIT: self.repaint()
elif e.type == MOUSEBUTTONDOWN:
if self.bar.collidepoint(e.pos):
self.grab = e.pos[0],e.pos[1]
self.grab_value = self.value
else:
x,y,adj = e.pos[0],e.pos[1],1
self.grab = None
self.repaint()
elif e.type == MOUSEBUTTONUP:
#x,y,adj = e.pos[0],e.pos[1],1
self.repaint()
elif e.type == MOUSEMOTION:
if 1 in e.buttons and self.container.myfocus is self:
if self.grab != None:
rel = e.pos[0]-self.grab[0],e.pos[1]-self.grab[1]
if self.orient == _SLIDER_HORIZONTAL:
d = (r.w - self.size)
if d != 0: self.value = self.grab_value + ((self.max-self.min) * rel[0] / d)
else:
d = (r.h - self.size)
if d != 0: self.value = self.grab_value + ((self.max-self.min) * rel[1] / d)
else:
x,y,adj = e.pos[0],e.pos[1],1
elif e.type is KEYDOWN:
if self.orient == _SLIDER_HORIZONTAL and e.key == K_LEFT:
self.value -= self.step
used = True
elif self.orient == _SLIDER_HORIZONTAL and e.key == K_RIGHT:
self.value += self.step
used = True
elif self.orient == _SLIDER_VERTICAL and e.key == K_UP:
self.value -= self.step
used = True
elif self.orient == _SLIDER_VERTICAL and e.key == K_DOWN:
self.value += self.step
used = True
if adj:
if self.orient == _SLIDER_HORIZONTAL:
d = self.size/2 - (r.w/(self.max-self.min+1))/2
self.value = (x-d) * (self.max-self.min) / (r.w-self.size+1) + self.min
else:
d = self.size/2 - (r.h/(self.max-self.min+1))/2
self.value = (y-d) * (self.max-self.min) / (r.h-self.size+1) + self.min
self.pcls = ""
if self.container.myhover is self: self.pcls = "hover"
if (self.container.myfocus is self and 1 in pygame.mouse.get_pressed()): self.pcls = "down"
return used
# TODO - replace this with property functions and setters
def __setattr__(self,k,v):
if k == 'value':
v = int(v)
v = max(v,self.min)
v = min(v,self.max)
_v = self.__dict__.get(k,NOATTR)
self.__dict__[k]=v
if k == 'value' and _v != NOATTR and _v != v:
self.send(CHANGE)
self.repaint()
if hasattr(self,'size'):
sz = min(self.size,max(self.style.width,self.style.height))
sz = max(sz,min(self.style.width,self.style.height))
self.__dict__['size'] = sz
#self.size = sz
if hasattr(self,'max') and hasattr(self,'min'):
if self.max < self.min: self.max = self.min
# @property
# def value(self):
# return self._value
#
# @value.setter
# def value(self, val):
# val = int(val)
# val = max(val, self.min)
# val = min(val, self.max)
#
# oldval = self._value
# self._value = val
# if (oldval != val):
# self.send(CHANGE)
# self.repaint()
#
# if hasattr(self,'size'):
# sz = min(self.size,max(self.style.width,self.style.height))
# sz = max(sz,min(self.style.width,self.style.height))
# self.size = sz
#
# if hasattr(self,'max') and hasattr(self,'min'):
# if self.max < self.min: self.max = self.min
class VSlider(_slider):
"""A verticle slider."""
def __init__(self,value,min,max,size,step=1,**params):
"""Construct a veritcal slider widget.
Arguments:
value -- the default position of the slider, between min and max
min -- the minimum value for the slider
max -- the maximum value
size -- the length of the slider bar in pixels
step -- how much to jump when using the keyboard
"""
params.setdefault('cls','vslider')
_slider.__init__(self,value,_SLIDER_VERTICAL,min,max,size,step,**params)
class HSlider(_slider):
"""A horizontal slider."""
def __init__(self,value,min,max,size,step=1,**params):
params.setdefault('cls','hslider')
_slider.__init__(self,value,_SLIDER_HORIZONTAL,min,max,size,step,**params)
class HScrollBar(table.Table):
"""A horizontal scroll bar."""
def __init__(self,value,min,max,size,step=1,**params):
params.setdefault('cls','hscrollbar')
table.Table.__init__(self,**params)
# Check that these styles are defined
self.style.check("minus")
self.style.check("plus")
self.slider = _slider(value,_SLIDER_HORIZONTAL,min,max,size,step=step,cls=self.cls+'.slider')
self.minus = basic.Image(self.style.minus)
self.minus.connect(MOUSEBUTTONDOWN,self._click,-1)
self.slider.connect(CHANGE,self.send,CHANGE)
self.minus2 = basic.Image(self.style.minus)
self.minus2.connect(MOUSEBUTTONDOWN,self._click,-1)
self.plus = basic.Image(self.style.plus)
self.plus.connect(MOUSEBUTTONDOWN,self._click,1)
self.size = size
def _click(self,value):
self.slider.value += self.slider.step*value
def resize(self,width=None,height=None):
self.clear()
self.tr()
w = self.style.width
h = self.slider.style.height
ww = 0
if w > (h*2 + self.minus.style.width+self.plus.style.width):
self.td(self.minus)
ww += self.minus.style.width
self.td(self.slider)
if w > (h*2 + self.minus.style.width+self.minus2.style.width+self.plus.style.width):
self.td(self.minus2)
ww += self.minus2.style.width
if w > (h*2 + self.minus.style.width+self.plus.style.width):
self.td(self.plus)
ww += self.plus.style.width
#HACK: handle theme sizing properly
xt,xr,xb,xl = pguglobals.app.theme.getspacing(self.slider)
ww += xr+xl
self.slider.style.width = self.style.width - ww
setattr(self.slider,'size',self.size * self.slider.style.width / max(1,self.style.width))
#self.slider.size = self.size * self.slider.style.width / max(1,self.style.width)
return table.Table.resize(self,width,height)
@property
def min(self):
return self.slider.min
@min.setter
def min(self, value):
self.slider.min = value
@property
def max(self):
return self.slider.max
@max.setter
def max(self, value):
self.slider.max = value
@property
def value(self):
return self.slider.value
@value.setter
def value(self, value):
self.slider.value = value
@property
def step(self):
return self.slider.step
@step.setter
def step(self, value):
self.slider.step = value
# def __setattr__(self,k,v):
# if k in ('min','max','value','step'):
# return setattr(self.slider,k,v)
# self.__dict__[k]=v
# def __getattr__(self,k):
# if k in ('min','max','value','step'):
# return getattr(self.slider,k)
# return table.Table.__getattr__(self,k) #self.__dict__[k]
class VScrollBar(table.Table):
"""A vertical scroll bar."""
def __init__(self,value,min,max,size,step=1,**params):
params.setdefault('cls','vscrollbar')
table.Table.__init__(self,**params)
# Check that these styles are defined
self.style.check("minus")
self.style.check("plus")
self.minus = basic.Image(self.style.minus)
self.minus.connect(MOUSEBUTTONDOWN,self._click,-1)
self.minus2 = basic.Image(self.style.minus)
self.minus2.connect(MOUSEBUTTONDOWN,self._click,-1)
self.plus = basic.Image(self.style.plus)
self.plus.connect(MOUSEBUTTONDOWN,self._click,1)
self.slider = _slider(value,_SLIDER_VERTICAL,min,max,size,step=step,cls=self.cls+'.slider')
self.slider.connect(CHANGE,self.send,CHANGE)
self.size = size
def _click(self,value):
self.slider.value += self.slider.step*value
def resize(self,width=None,height=None):
self.clear()
h = self.style.height
w = self.slider.style.width
hh = 0
if h > (w*2 + self.minus.style.height+self.plus.style.height):
self.tr()
self.td(self.minus)
hh += self.minus.style.height
self.tr()
self.td(self.slider)
if h > (w*2 + self.minus.style.height+self.minus2.style.height+self.plus.style.height):
self.tr()
self.td(self.minus2)
hh += self.minus2.style.height
if h > (w*2 + self.minus.style.height+self.plus.style.height):
self.tr()
self.td(self.plus)
hh += self.plus.style.height
#HACK: handle theme sizing properly
xt,xr,xb,xl = pguglobals.app.theme.getspacing(self.slider)
hh += xt+xb
self.slider.style.height = self.style.height - hh
setattr(self.slider,'size',self.size * self.slider.style.height / max(1,self.style.height))
return table.Table.resize(self,width,height)
def __setattr__(self,k,v):
if k in ('min','max','value','step'):
return setattr(self.slider,k,v)
self.__dict__[k]=v
def __getattr__(self,k):
if k in ('min','max','value','step'):
return getattr(self.slider,k)
return table.Table.__getattr__(self,k)
|
|
#!/usr/bin/env python
# missing tests:
# DONE: vv time offset (needs root)
# DONE: NTP offset is small
# DONE: all 8 disks are present
# DONE: disks are not too full (rtime?)
# DONE: modules are in open state ready to record (dplane status? should work)
# DONE: input stream is sensible
# DONE: GPS PPS offset is not unreasonably large (can happen if GPS unlocked)
# DONE: modules are in a group? - assume this is covered by the other tests
# DONE: (dplane state check) group=open was done before input_stream ? (i.e. input streams armable)
# DONE: mark6 interrupts in /etc/default/mark6
# thermal check? other kernel errors? demsg -> CPU3: Temperature above threshold, cpu clock throttled (total events = 1595)
# eth3 and eth5 are UP? tcpdump check? dropped packet statistics?
# vv test is impossible right now:
# oper@Mark6-4047:~$ sudo /home/oper/bin/vv -i eth3 -p 4001 -n 5
# [sudo] password for oper:
# Sorry, user oper is not allowed to execute '/home/oper/bin/vv -i eth3 -p 4001 -n 5' as root on localhost.
# oper@Mark6-4047:~$ /home/oper/bin/vv -i eth3 -p 4001 -n 5
# eth3: You don't have permission to capture on that device (socket: Operation not permitted)
import sys
import unittest
import corr
import httplib
import os
import re
import numpy as np
import adc5g
import time
import subprocess
import socket
from datetime import datetime, timedelta
from pkg_resources import parse_version
if len(sys.argv) == 1:
sys.argv.append('-v')
def get_switch_ip():
code = open("/usr/local/src/r2dbe/software/switch_set_IF.py").read()
return code.split('"')[1]
r2_hostname = 'r2dbe-1'
input_streams = {'eth3':'12', 'eth5':'34'} # input streams we will test for
switch_ip = get_switch_ip()
m6_software_version = '1.2j'
r2_bitcode_md5sum = '6421249e83aa86a9f2630b2c2ea04d22'
if_power_tol = 12 # deviation of std from ideal in ADC 8bit units
r2_threshold_tol = 4 # deviation of th from ideal in ADC 8bit units
vv_threshold = 0.05 # seconds offset after which to warn for vv packet vs system time
ntp_threshold = 0.05 # seconds offset after which to warn about NTP offset size
gpspps_threshold = 1e-5 + (5e-12 * 86400 * 20) # reasonable PPS drift since lock + GPS scatter
# socket defn to cplane
socket_res = socket.getaddrinfo('127.0.0.1', 14242, socket.AF_INET, socket.SOCK_STREAM)[0]
def cplanecmd(cmd):
af, socktype, proto, canonname, sa = socket_res
s = socket.socket(af,socktype, proto)
s.connect(sa)
s.sendall(cmd + ';') # extra ';' will not matter
ret = s.recv(8192).strip()
s.close()
return ret
recstate = cplanecmd('record?;')
if "recording" in recstate:
sys.exit('currently recording do not run!')
roach2 = corr.katcp_wrapper.FpgaClient(r2_hostname)
roach2.wait_connected()
x0 = np.array(adc5g.get_snapshot(roach2, 'r2dbe_snap_8bit_0_data'))
x1 = np.array(adc5g.get_snapshot(roach2, 'r2dbe_snap_8bit_1_data'))
th0 = roach2.read_int('r2dbe_quantize_0_thresh')
th1 = roach2.read_int('r2dbe_quantize_1_thresh')
class R2EpochIsCorrect(unittest.TestCase):
def test(self):
utcnow = datetime.utcnow()
epoch = 2*(utcnow.year - 2000) + (utcnow.month > 6)
self.assertEqual(epoch, roach2.read_int('r2dbe_vdif_0_hdr_w1_ref_ep'), "epoch set in R2DBE startup script")
self.assertEqual(epoch, roach2.read_int('r2dbe_vdif_1_hdr_w1_ref_ep'), "epoch set in R2DBE startup script")
# class R2SecondsAreCorrect(unittest.TestCase):
# def test(self):
# utcnow = datetime.utcnow()
# wait = (1500000 - utcnow.microsecond) % 1e6 # get to 0.5s boundary
# time.sleep(wait / 1e6)
# utcnow = datetime.utcnow()
# refdate = datetime(utcnow.year, 1+6*(utcnow.month > 6), 1)
# # print refdate
# dt = utcnow - refdate
# totalsec = dt.days * 86400 + dt.seconds
# gpscnt = roach2.read_uint('r2dbe_onepps_gps_pps_cnt')
# r2sec0 = gpscnt + roach2.read_int('r2dbe_vdif_0_hdr_w0_sec_ref_ep')
# r2sec1 = gpscnt + roach2.read_int('r2dbe_vdif_1_hdr_w0_sec_ref_ep')
# # print r2sec0 - totalsec
# self.assertEqual(totalsec, r2sec0)
# self.assertEqual(totalsec, r2sec1)
class R2BitcodeIsUpToDate(unittest.TestCase):
def test(self):
import hashlib
path = '/srv/roach2_boot/current/boffiles/r2dbe_rev2.bof'
md5 = hashlib.md5(open(path, 'rb').read()).hexdigest()
self.assertEqual(md5, r2_bitcode_md5sum, "R2DBE bitcode in git repository")
class R2IsConnected(unittest.TestCase):
def test(self):
self.assertTrue(roach2.is_connected(), "Check ethernet connection and MAC addresses in Mark6 dnsmasq")
class R2GpsIncrementedBy1(unittest.TestCase):
def test(self):
utcnow = datetime.utcnow()
wait = (1500000 - utcnow.microsecond) % 1e6 # get to 0.5s boundary
time.sleep(wait / 1e6)
gpspps1 = roach2.read_uint('r2dbe_onepps_gps_pps_cnt')
time.sleep(1.0)
gpspps2 = roach2.read_uint('r2dbe_onepps_gps_pps_cnt')
self.assertTrue(gpspps2 == gpspps1 + 1, "Check GPS PPS signal")
class R2ClockIs256MHz(unittest.TestCase):
def test(self):
a=roach2.read_uint('sys_clkcounter')
b=roach2.read_uint('sys_clkcounter')
time.sleep(0.5)
c=roach2.read_uint('sys_clkcounter')
if c < b:
c += 2**32
if b < a:
b += 2**32
c += 2**32
clk = (c-2*b+a)/5e5
# temporary extra diagnostic for this test
self.assertTrue(abs(clk - 256.) < 10.0, "Check 10 MHz and 2048 clock synth [%.2f]" % clk)
class IFPowerIsGood(unittest.TestCase):
def test(self):
self.assertTrue(np.abs(np.std(x0) - 35.) <= if_power_tol and
np.abs(np.std(x1) - 35.) <= if_power_tol, "Verify IF power and BDC attenuators")
class IFThresholdIsGood(unittest.TestCase):
def test(self):
self.assertTrue(np.abs(np.std(x0) - th0) <= r2_threshold_tol and
np.abs(np.std(x1) - th1) <= r2_threshold_tol, "Run alc.py")
class SwitchIsSetToIF(unittest.TestCase):
def test(self):
connection = httplib.HTTPConnection(switch_ip,80)
connection.request("GET","/SWPORT?")
response = connection.getresponse()
data = response.read()
self.assertEqual(int(data), 0, "Run switch_set_IF.py before recording VLBI")
class Mark6SoftwareIsCurrent(unittest.TestCase):
def test(self):
self.assertTrue(os.path.exists('/usr/local/src/Mark6_%s' % m6_software_version), "nstall new Mark6 software")
class TimezoneIsUTC(unittest.TestCase):
def test(self):
self.assertEqual(time.tzname[1], 'UTC', "Mark6 must be set to UTC")
class Mark6InterruptsCorrect(unittest.TestCase):
def test(self):
line = (l for l in open('/etc/default/mark6') if l[:8] == 'MK6_OPTS').next()
self.assertTrue(all(dev in line for dev in input_streams.keys()), "/etc/default/mark6 does not match input_streams defined at top")
class NTPOffsetIsSmall(unittest.TestCase):
def test(self):
res = subprocess.Popen("ntpq -pn".split(), stdout=subprocess.PIPE)
offsetms = float(res.communicate()[0].strip().split('\n')[-1].split()[-2])
self.assertTrue(abs(offsetms) < 1e3*ntp_threshold, "Try to restart NTP and monitor offset")
class GPSPPSOffsetIsSmall(unittest.TestCase):
def test(self):
noffset = roach2.read_int('r2dbe_onepps_offset')
self.assertTrue(abs(noffset) < gpspps_threshold * 256.e6, "GPS-vs-PPS offset is large, was the GPS lock good?")
class SASDisksAreAllThere(unittest.TestCase):
def test(self):
res = subprocess.Popen("lsscsi -t".split(), stdout=subprocess.PIPE)
ndisk = res.communicate()[0].count("sas:")
self.assertTrue(ndisk == 32, "Found %d SAS disks not 32" % ndisk)
class SASDisksAreAllMounted(unittest.TestCase):
def test(self):
nmount = open('/proc/mounts').read().count('/mnt/disks/')
self.assertTrue(nmount == 64, "Found %d mount points not 32 (data+meta)")
class EnoughSpaceFor10hrs(unittest.TestCase):
def test(self):
rtime = cplanecmd('rtime?16000;') # 16000 Mbps
rtimesec = float(rtime.split(':')[4])
self.assertTrue(rtimesec >= 10*3600, "only %.1f hours left on modules!" % (rtimesec/3600.))
class cplaneIsRunningAndUnder1GB(unittest.TestCase):
def test(self):
res = subprocess.Popen("/bin/ps ho rss -C cplane".split(), stdout=subprocess.PIPE)
out = res.communicate()[0]
self.assertTrue(out != "" and int(out) < 1e6, "Run /etc/init.d/cplane restart")
class dplaneIsRunning(unittest.TestCase):
def test(self):
res = subprocess.Popen("/bin/ps ho rss -C dplane".split(), stdout=subprocess.PIPE)
out = res.communicate()[0]
self.assertTrue(out != "", "Run /etc/init.d/dplane start")
class dplaneIsReadyToRecord(unittest.TestCase):
def test(self):
ret = cplanecmd('status?;')
self.assertTrue(ret == '!status?0:0:0x3333301;' or ret == '!status?0:0:0x3333311;')
# will only check them as defined by cplane not dplane
class InputStreamsAreCorrect(unittest.TestCase):
def test(self):
streams = cplanecmd('input_stream?;')
(s1, s2) = input_streams.items()
s1s2 = re.match('!input_stream\?0:0.+vdif:8224:50:42:%s.+%s.+vdif:8224:50:42:%s.+%s;' % (s1[0], s1[1], s2[0], s2[1]), streams)
s2s1 = re.match('!input_stream\?0:0.+vdif:8224:50:42:%s.+%s.+vdif:8224:50:42:%s.+%s;' % (s2[0], s2[1], s1[0], s1[1]), streams)
self.assertTrue(s1s2 is not None or s2s1 is not None, "input streams not consistent with input_streams defined at top")
class R2PPSNearSystemClock(unittest.TestCase):
def test(self):
if os.getuid() != 0:
raise OSError("Must be root")
res3 = subprocess.Popen("/home/oper/bin/vv -i eth3 -p 4001 -n 1".split(), stdout=subprocess.PIPE)
eth3dt = float(res3.communicate()[0][:-2].split()[-1])
res5 = subprocess.Popen("/home/oper/bin/vv -i eth5 -p 4001 -n 1".split(), stdout=subprocess.PIPE)
eth5dt = float(res5.communicate()[0][:-2].split()[-1])
self.assertTrue(abs(eth3dt) < vv_threshold and abs(eth5dt) < vv_threshold,
"packet times at (%.3f, %.3f), check GPS lock and NTP" % (eth3dt, eth5dt))
class LastScanCheckOK(unittest.TestCase):
def test(self):
if "pending" in recstate: # if pending look at previous completed scan
sc = cplanecmd('scan_check?%d;' % (int(recstate.split(':')[3])-1))
else:
sc = cplanecmd('scan_check?;')
if 'unk' in sc:
raise ValueError('cplane confused about last scan, perhaps record pending: ' + sc)
self.assertTrue('OK' in sc)
if __name__ == '__main__':
unittest.main()
|
|
import logging
import sys
import time
from copy import deepcopy
from datetime import datetime
from re import search
import asyncio
import types
from concurrent.futures import ThreadPoolExecutor
from importlib import import_module
from mbot.config import Config
from mbot.exceptions import ShuttingDown
from mbot.middleware import Middleware
from mbot.state import StateMachine
from mbot.storage import S3, Storage
from mbot.utils.packages import install_package
from typing import Any, Callable, List, Optional # NOQA
LOG = logging.getLogger(__name__)
EXECUTOR_POOL_SIZE = 10
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
def callback(func: Callable[..., None]) -> Callable[..., None]:
"""Annotation to mark method as safe to call from within the event loop."""
# pylint: disable=protected-access
func._callback = True
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return '_callback' in func.__dict__
class Bot:
"""Yet another simple bot
Has simple in memory global state which is available in middlewares
:param: middlewares: import strings or callables
:param: state: optional your state object
:param: process_types: array of event types default is set to all
:param: extra: extra dictionary which is available in middlewares
for example connection strings etc..
"""
# disable processing for own events
filter_itself = False
_backends = []
possible_storages = {
's3': 'mbot.storage.S3',
'local': 'mbot.storage.Storage',
}
def __init__(self, backends=None, middlewares=None, state=None,
process_types=None, extra={}, state_path=None,
storage_engine="local", conf=None,
config_path=None, encrypt=True, **kwargs):
self.process_types = process_types
self.extra = extra
self.conf = conf or Config(data_path=state_path,
config_path=config_path,
encrypt=encrypt,
**kwargs)
self.conf.init()
self.conf.init_logging()
if self.conf.storage['encrypt']:
self.fernet = self.conf.get_fernet_token()
# this is passed to all middlewares
self.kwargs = kwargs
StorageCls = self.load_thing(self.possible_storages[storage_engine])
self.storage = StorageCls(self, encrypt=encrypt, **self.kwargs)
# state persistence
try:
self.state = self.storage.restore_state(self.conf.data_path)
self.conf.init()
self.conf.init_logging()
except:
self.state = StateMachine(self)
finally:
self.state.state_path = self.conf.data_path
# load dynamic parts
self.load_backends(backends or self.conf.core['backends'])
self.executor = ThreadPoolExecutor(max_workers=EXECUTOR_POOL_SIZE)
# initialize pools
if sys.platform == 'win32':
self.loop = asyncio.ProactorEventLoop()
else:
self.loop = asyncio.get_event_loop()
self.loop.set_default_executor(self.executor)
self.loop.set_exception_handler(self._async_exception_handler)
@callback
def _async_exception_handler(self, loop, context):
"""Handle all exception inside the core loop."""
kwargs = {}
exception = context.get('exception')
if exception:
# Do not report on shutting down exceptions.
if isinstance(exception, ShuttingDown):
return
kwargs['exc_info'] = (type(exception), exception,
exception.__traceback__)
LOG.exception("Error doing job: %s", context['message'], **kwargs)
def load_thing(self, name):
"""Load whatever
"""
if isinstance(name, str):
module = import_module(".".join(name.split(".")[:-1]))
if module:
return getattr(module, name.split(".")[-1], None)
raise Exception("Cannot load %s" % name)
def load_backends(self, backends=[]):
for m in backends:
path = self.conf.conf[m]['engine']
BackendCls = self.load_thing(path)
if BackendCls:
self._backends.append(BackendCls(m, self, **self.kwargs))
continue
@callback
def _async_add_job(self, target: Callable[..., None], *args: Any) -> None:
"""Add a job from within the eventloop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if asyncio.iscoroutine(target):
self.loop.create_task(target)
elif is_callback(target):
self.loop.call_soon(target, *args)
elif asyncio.iscoroutinefunction(target):
self.loop.create_task(target(*args))
else:
self.loop.run_in_executor(None, target, *args)
add_async_job = _async_add_job
def run_pending_jobs(self):
"""Run all jobs marked for run"""
now = datetime.now()
now = now.replace(microsecond=0)
for key, job in self.state.jobs.all().items():
if job.when <= now:
LOG.debug("Run scheduled job: %s" % job)
job.process(self, now)
def process_middlewares(self, backend, messages):
try:
backend.process_middlewares(messages)
except Exception as e:
LOG.exception(e)
@asyncio.coroutine
def handle_backend(self, backend) -> None:
"""Run loop action on backend
"""
if backend.connect():
while True:
messages = backend.read()
self.state.add_new_messages(messages)
self.add_async_job(self.process_middlewares,
backend, messages)
self.add_async_job(self.run_pending_jobs)
asyncio.sleep(1)
else:
LOG.info("Connection Failed for %s." % backend)
@asyncio.coroutine
def async_stop(self, exit_code=0) -> None:
"""Stop BOT and shuts down all threads.
This method is a coroutine.
"""
self.executor.shutdown()
self.exit_code = exit_code
self.loop.stop()
self.state.save()
def run(self, backends=None) -> None:
"""Run Slackbot
"""
for backend in backends or self._backends:
if not backend.own_loop:
self.loop.create_task(self.handle_backend(backend))
else:
backend.start_loop()
# Run forever and catch keyboard interrupt
try:
# Block until stopped
LOG.info("Starting BOT core loop")
self.loop.run_forever()
except KeyboardInterrupt:
for backend in backends or self._backends:
if backend.own_loop:
backend.stop_loop()
self.loop.create_task(self.async_stop())
self.loop.run_forever()
finally:
self.loop.close()
def get_backend(self, code):
for backend in self._backends:
if code == backend.code:
return backend
def get_middlewares(self, backends=None):
middlewares = []
for b in backends or self._backends:
middlewares += b.middlewares
return middlewares
def get_middleware(self, name=None, backends=None):
for b in backends or self._backends:
m = b.get_middleware(name)
if m:
return m
def __getstate__(self):
state = self.__dict__.copy()
# Remove the unpicklable entries.
if 'loop' in state:
del state['loop']
if 'executor' in state:
del state['executor']
if 'fernet' in state:
del state['fernet']
return state
_instance = None
def __new__(cls, *args, **kwargs):
"""A singleton implementation of AppLoader. There can be only one.
"""
if not cls._instance:
cls._instance = super().__new__(cls)
return cls._instance
|
|
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.models import User, Group, Permission, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from .models import UserPermissionList, \
GroupPermissionList
from .utils import add_permission_to_user, \
add_user_to_group, add_permission_to_group, update_permissions_user, update_user_groups, update_permissions_group
class BackendTest(TestCase):
def setUp(self):
self.old_auth_backends = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (
'permission_backend_nonrel.backends.NonrelPermissionBackend',
)
User.objects.create_user('test', 'test@example.com', 'test')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.old_auth_backends
def test_update_permissions_user(self):
content_type = ContentType.objects.get_for_model(User)
perm = Permission.objects.create(name='test',
content_type=content_type,
codename='test')
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), False)
user = User.objects.get(username='test')
# add a permission
update_permissions_user([perm], user)
self.assertEqual(UserPermissionList.objects.count(), 1)
pl = UserPermissionList.objects.all()[0]
self.assertEqual(pl.permission_list , ['%s.%s'%(perm.content_type.app_label, perm.codename)])
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perm('auth.test23x'), False)
# add a duplicated permission
user = User.objects.get(username='test')
update_permissions_user([perm], user)
self.assertEqual(UserPermissionList.objects.count(), 1)
pl = UserPermissionList.objects.all()[0]
self.assertEqual(pl.permission_list , ['%s.%s'%(perm.content_type.app_label, perm.codename)])
# add a list of permissions
perm1 = Permission.objects.create(name='test1',
content_type=content_type,
codename='test1')
perm2 = Permission.objects.create(name='test2',
content_type=content_type,
codename='test2')
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test1'), False)
self.assertEqual(user.has_perm('auth.test2'), False)
user = User.objects.get(username='test')
update_permissions_user([perm1, perm2, perm], user)
self.assertEqual(user.has_perm('auth.test1'), True)
self.assertEqual(user.has_perm('auth.test2'), True)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perm('auth.test23x'), False)
user = User.objects.get(username='test')
pl = UserPermissionList.objects.all()[0]
update_permissions_user([perm], user)
self.assertEqual(user.has_perm('auth.test1'), False)
self.assertEqual(user.has_perm('auth.test2'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perm('auth.test23x'), False)
# remove all permissions
user = User.objects.get(username='test')
update_permissions_user([], user)
self.assertEqual(UserPermissionList.objects.count(), 1)
pl = UserPermissionList.objects.all()[0]
self.assertEqual(pl.permission_list , [])
self.assertEqual(user.has_perm('auth.test'), False)
self.assertEqual(user.has_perm('auth.test1'), False)
self.assertEqual(user.has_perm('auth.test2'), False)
def test_add_user_to_group(self):
user = User.objects.get(username='test')
group = Group.objects.create(name='test_group')
update_user_groups(user, [group])
self.assertEqual(UserPermissionList.objects.count(), 1)
self.assertNotEqual(UserPermissionList.objects.all()[0] , None)
def test_update_permissions_group(self):
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test',
content_type=content_type,
codename='test')
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), False)
user = User.objects.get(username='test')
group = Group.objects.create(name='test_group')
add_user_to_group(user, group)
update_permissions_group([perm], group)
self.assertEqual(GroupPermissionList.objects.count(), 1)
gl = GroupPermissionList.objects.all()[0]
self.assertEqual(gl.permission_list , ['%s.%s'%(perm.content_type.app_label, perm.codename)])
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perm('auth.test2312'), False)
group1= Group.objects.create(name='test_group1')
perm1 = Permission.objects.create(name='test1',
content_type=content_type,
codename='test1')
add_user_to_group(user, group1)
update_permissions_group([perm1], group1)
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perm('auth.test1'), True)
update_permissions_group([], group1)
group_list = UserPermissionList.objects.filter(group_fk_list=group1.id)
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perm('auth.test1'), False)
update_user_groups(user, [group1])
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), False)
self.assertEqual(user.has_perm('auth.test1'), False)
def test_has_perm(self):
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = False
user.is_superuser = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = User.objects.get(username='test')
content_type = ContentType.objects.get_for_model(Permission)
perm = Permission.objects.create(name='test',
content_type=content_type,
codename='test')
# default django way (ManyToManyField)
#user.user_permissions.add(perm)
add_permission_to_user(perm, user)
# reloading user to purge the _perm_cache
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions() == set([u'auth.test']), True)
self.assertEqual(user.get_group_permissions(), set([]))
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2',
content_type=content_type,
codename='test2')
# default django way (ManyToManyField)
#user.user_permissions.add(perm)
add_permission_to_user(perm, user)
perm = Permission.objects.create(name='test3',
content_type=content_type,
codename='test3')
# default django way (ManyToManyField)
#user.user_permissions.add(perm)
add_permission_to_user(perm, user)
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions(),
set([u'auth.test2', u'auth.test', u'auth.test3']))
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group',
content_type=content_type,
codename='test_group')
group = Group.objects.create(name='test_group')
# default django way (ManyToManyField)
#group.permissions.add(perm)
add_permission_to_group(perm, group)
# default django way (ManyToManyField)
#user.groups.add(group)
add_user_to_group(user, group)
user = User.objects.get(username='test')
exp = set([u'auth.test2', u'auth.test',
u'auth.test3', u'auth.test_group'])
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(),
set([u'auth.test_group']))
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']),
True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = User.objects.get(username='test')
content_type = ContentType.objects.get_for_model(Group)
content_type.save()
perm = Permission.objects.create(name='test',
content_type=content_type,
codename='test')
# default django way (ManyToManyField)
#user.user_permissions.add(perm)
add_permission_to_user(perm, user)
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set([]))
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), set(['auth.test']))
def test_authenticate(self):
user = User.objects.get(username='test')
self.assertEquals(authenticate(username='test', password='test'), user)
self.assertEquals(authenticate(username='test', password='testNones'),
None)
|
|
import threading
from copy import deepcopy
import numpy as np
import cv2
from cv2 import cv
from cv_bridge import CvBridge
import rospy
import baxter_interface
from geometry_msgs.msg import (
PolygonStamped,
)
from sensor_msgs.msg import (
Image,
)
from std_msgs.msg import (
String,
)
class LPVision(object):
def __init__(self, limb):
self._side = limb
self._camera_name = self._side + "_hand_camera"
print ("Opening " + self._camera_name + "...")
self._camera = baxter_interface.CameraController(self._camera_name)
self._camera.open()
self._camera.resolution = [640, 400]
self._camera.gain = 20
self._side_roi = 400
self._side_other_roi = 50
self._no_squares = 8
# 400 / 8 = 50
self._square_side_roi = self._side_roi / self._no_squares
self.grid = [[0 for _i in range(self._no_squares)]
for _j in range(self._no_squares)]
print self.grid
self.cv_image = None
self._bridge = CvBridge()
self.red_sample = ()
self.blue_sample = ()
self.reds = None
self.blues = None
self._roi_points = [[100, 200],
[200, 200],
[200, 100],
[100, 100]] # magic (clockwise)
self._other_roi_points = [[375, 400],
[425, 400],
[425, 350],
[375, 350]] # magic (clockwise)
self._is_pickable = False
self._roi_move = False
self._point_selected = -1
self._gain_slider = 30
self._high_colour_slider = 22
self._low_colour_slider = 2
self._low_colour = np.array([self._low_colour_slider, 50, 50])
self._high_colour = np.array([self._high_colour_slider, 255, 255])
self._inrange_colour_thresh = 30
# self._yellow_thresh = 100
self._slider_time = rospy.Time.now()
self._gain_set = False
self._text = ['X', 'Y', 'R', 'G', 'B']
self._pixel = {}
for label in self._text:
self._pixel[label] = 0.0
self._vector = {}
self._grid = [[0 for _i in range(self._no_squares)]
for _j in range(self._no_squares)]
self._pnts = [[0 for i in range(self._square_side_roi + 1)]
for j in range(self._square_side_roi + 1)]
self._pieces_positions = []
self._np_image = np.zeros((self._side_roi, self._side_roi, 3),
np.uint8)
self._image_grid = np.zeros((self._side_roi, self._side_roi, 3),
np.uint8)
self._inrange_colour = np.zeros((self._side_roi, self._side_roi),
np.uint8)
self._projected = np.zeros((self._side_roi, self._side_roi, 3),
np.uint8)
self._other_projected = np.zeros((self._side_other_roi,
self._side_other_roi,
3),
np.uint8)
self.subLock = threading.Lock()
camera_topic = '/cameras/' + self._camera_name + '/image'
_camera_sub = rospy.Subscriber(
camera_topic,
Image,
self._on_camera)
roi_topic = '/learn_play/localize/grid_pixels'
_roi_sub = rospy.Subscriber(
roi_topic,
PolygonStamped,
self._on_roi)
board_state_topic = '/vision/learn_play_state'
self._board_state_pub = rospy.Publisher(
board_state_topic,
String)
rest = {'right_e0': 1.3042671634094238,
'right_e1': 0.9602719721191407,
'right_s0': -0.5361262847534181,
'right_s1': -0.3535825712036133,
'right_w0': -1.1048496612121583,
'right_w1': 1.54203418526001,
'right_w2': -0.5004612314758301}
self._other_arm = baxter_interface.Limb("right")
self._other_arm.move_to_joint_positions(rest)
print ' - All set - '
print " - - - - - - - "
self._process_images()
def _process_images(self):
print " - Starting to process images! - "
while not rospy.is_shutdown():
t = rospy.Time.now()
delta = rospy.Duration(2.0)
# gain changed from slider settled
if (t - self._slider_time > delta and self._gain_set):
self._gain_set = False
print 'Setting GAIN!'
self._camera.gain = self._gain_slider
# process red/yellow image
self._show_image()
self._project_roi()
# self._filter_yellow()
self._filter_red()
self._process_colors(deepcopy(self._inrange_colour))
self._update_image_grid()
self._is_pickable = self._project_other_roi()
# publish state
self._pub_state()
rospy.sleep(0.1)
def _show_image(self):
self.subLock.acquire(True)
local_image = deepcopy(self._np_image)
self.subLock.release()
# draw circles
for idx, points in enumerate(self._roi_points):
cv2.circle(local_image, (points[0], points[1]), 5, (255, 0, 0), 2)
# draw green lines
cv2.polylines(local_image, np.int32([np.array(self._roi_points)]),
1, (0, 255, 0), 2)
cv2.polylines(local_image, np.int32([np.array(
self._other_roi_points)]),
1, (0, 255, 0), 2)
cv.ShowImage("Learn Play game RGB", cv.fromarray(local_image))
cv.SetMouseCallback("Learn Play game RGB", self._on_mouse_click, 0)
cv.CreateTrackbar("Gain", "Learn Play game RGB", self._gain_slider,
100, self._on_gain_slider)
cv.CreateTrackbar("Red Threshold", "Learn Play game RGB",
self._inrange_colour_thresh, 500,
self._on_red_slider)
cv.CreateTrackbar("High red", "Learn Play game RGB",
self._high_colour_slider,
40, self._on_high_colour_slider)
cv.CreateTrackbar("Low red", "Learn Play game RGB",
self._low_colour_slider,
40, self._on_low_colour_slider)
cv.WaitKey(3)
def _project_roi(self):
warped_in = np.float32([np.array(self._roi_points)])
project_out = np.float32([[0, 0],
[self._side_roi, 0],
[self._side_roi, self._side_roi],
[0, self._side_roi]])
M = cv2.getPerspectiveTransform(warped_in, project_out)
self.subLock.acquire(True)
local_image = deepcopy(self._np_image)
self.subLock.release()
self._projected = cv2.warpPerspective(local_image,
M,
(self._side_roi,
self._side_roi))
self._blurred = cv2.GaussianBlur(self._projected, (0, 0), 3)
self._projected = cv2.addWeighted(self._projected,
1.5,
self._blurred,
-0.5,
0,
self._projected)
def _filter_red(self):
"""
Finds red colors in HSV space
"""
hsv = cv2.cvtColor(self._projected, cv2.COLOR_BGR2HSV)
self._inrange_colour = cv2.inRange(hsv, self._low_colour,
self._high_colour)
cv.ShowImage('Orange', cv.fromarray(self._inrange_colour))
def _process_colors(self, red):
# look down each column building up from bottom
self._grid = [[0 for _i in range(self._no_squares)]
for _j in range(self._no_squares)]
self._image_grid = deepcopy(self._projected)
self._pieces_positions = []
for col in xrange(self._no_squares):
cur_row = True
x_offset = self._square_side_roi * col
# Look from the bottom up checking if piece is there
for row in xrange(self._no_squares):
if cur_row: # runs first time
y_offset = self._square_side_roi * row
# print "y = ", y_offset
red_cnt = 0
# look though each pixel in current grid location
if len(red) != self._side_roi:
print 'BAILING - IMAGE SIZE IS UNEXPECTED'
return
for y in xrange(0, self._square_side_roi, 2):
for x in xrange(0, self._square_side_roi, 2):
if red[y + y_offset, x + x_offset] == 255:
red_cnt += 1
if red_cnt > self._inrange_colour_thresh: # Speed tweak
cv2.putText(self._image_grid,
'o',
(x_offset + 20, y_offset + 40),
cv2.FONT_HERSHEY_COMPLEX_SMALL,
2,
(0, 0, 255))
self._grid[row][col] = 1
self._pieces_positions.append((row, col))
break
if red_cnt > self._inrange_colour_thresh:
break # (sigh)
def _update_image_grid(self):
for idx in xrange(1, self._no_squares):
cv2.line(self._image_grid,
(self._square_side_roi * idx, 0),
(self._square_side_roi * idx, self._side_roi),
(0, 255, 0),
1)
cv2.line(self._image_grid,
(0, self._square_side_roi * idx),
(self._side_roi, self._square_side_roi * idx),
(0, 255, 0),
1)
cv2.line(self._image_grid,
(self._square_side_roi * self._no_squares, 0),
(self._square_side_roi * self._no_squares,
self._side_roi),
(0, 255, 0),
1)
cv.ShowImage('Board State', cv.fromarray(self._image_grid))
def _transform_positions(self, positions):
return [(y, 7 - x) for (x, y) in positions]
def _pub_state(self):
state = dict()
self._pieces_positions = self._transform_positions(
self._pieces_positions)
print rospy.Time.now(), " - ", self._pieces_positions
state['baxter_count'] = self._pieces_positions
# state['user_count'] = self._user_cnt
state['board'] = self._grid
state['picking_state'] = self._is_pickable
self._board_state_pub.publish(str(state))
def _on_roi(self, data):
if data.polygon.points:
for idx, point in enumerate(data.polygon.points):
self._roi_points[3 - idx] = [int(point.x), int(point.y)]
def _on_camera(self, data):
try:
self.cv_image = self._bridge.imgmsg_to_cv(data, "bgr8")
local_image = np.asarray(self.cv_image)
except Exception:
print 'Cannot get image from Baxter'
self.subLock.acquire(True)
self._np_image = deepcopy(local_image)
self.subLock.release()
def _on_gain_slider(self, pos):
self._gain_slider = pos
self._gain_set = True
self._slider_time = rospy.Time.now()
def _on_red_slider(self, pos):
self._inrange_colour_thresh = pos
def _on_high_colour_slider(self, pos):
self._high_colour_slider = pos
self._high_colour = np.array([self._high_colour_slider, 255, 255])
def _on_low_colour_slider(self, pos):
self._low_colour_slider = pos
self._low_colour = np.array([self._low_colour_slider, 50, 50])
def _on_mouse_click(self, event, x, y, flags, param):
if event == cv.CV_EVENT_LBUTTONDOWN:
width, height = cv.GetSize(self.cv_image)
for idx, points in enumerate(self._roi_points):
if (x <= points[0] + 5 and
x >= points[0] - 5 and
y <= points[1] + 5 and
y >= points[1] - 5):
self._roi_move = True
self._point_selected = idx
elif event == cv.CV_EVENT_MOUSEMOVE and self._roi_move:
self._roi_points[self._point_selected] = [x, y]
elif event == cv.CV_EVENT_LBUTTONUP and self._roi_move:
self._roi_move = False
def check_for_pick(self):
"""
Returns Bool
"""
local_image = deepcopy(self._np_image)
def _project_other_roi(self):
warped_in = np.float32([np.array(self._other_roi_points)])
project_out = np.float32([[0, 0],
[self._side_other_roi, 0],
[self._side_other_roi, self._side_other_roi],
[0, self._side_other_roi]])
M = cv2.getPerspectiveTransform(warped_in, project_out)
self.subLock.acquire(True)
local_image = deepcopy(self._np_image)
self.subLock.release()
self._other_projected = cv2.warpPerspective(local_image,
M,
(self._side_other_roi,
self._side_other_roi))
# Finds red colors in HSV space
hsv = cv2.cvtColor(self._other_projected, cv2.COLOR_BGR2HSV)
self._inrange_colour = cv2.inRange(hsv, self._low_colour,
self._high_colour)
cv.ShowImage('Colour', cv.fromarray(self._inrange_colour))
# the following can probably be optimized
red_cnt = 0
for x in range(self._side_other_roi):
for y in range(self._side_other_roi):
if red_cnt > self._inrange_colour_thresh: # Speed tweak
return True
else:
if self._inrange_colour[x, y] == 255:
red_cnt += 1
return False
|
|
import os
import random
import storjnode
from storjnode.common import STORJ_HOME
DEFAULT_SHARD_SIZE = 1024 * 1024 * 128 # 128M
DEFAULT_STORE_PATH = os.path.join(STORJ_HOME, "store")
DEFAULT_STORE_CONFIG = {
DEFAULT_STORE_PATH: {"limit": 0, "use_folder_tree": False}
}
_log = storjnode.log.getLogger(__name__)
_builtin_open = open
def _get_shard_path(store_path, shard_id, use_folder_tree,
create_needed_folders=False):
if use_folder_tree:
folders = os.path.join(*storjnode.util.chunks(shard_id, 3))
store_path = os.path.join(store_path, folders)
if create_needed_folders:
storjnode.util.ensure_path_exists(store_path)
return os.path.join(store_path, shard_id)
def setup(store_config=None):
"""Setup store so it can be use to store shards.
This will validate the store paths and create any needed directories.
Args:
store_config: Dict of storage paths to optional attributes.
limit: The dir size limit in bytes, 0 for no limit.
use_folder_tree: Files organized in a folder tree
(always on for fat partitions).
Returns:
The normalized store_config with any missing attributes added.
Raises:
AssertionError: If input not valid.
Example:
import storjnode
store_config = {
"path/alpha": {"limit": 0, "use_folder_tree": False}
"path/beta": {"limit": 2**32, "use_folder_tree": True}
}
normalized_paths = storjnode.storage.store.setup(store_config)
"""
normal_paths = {}
store_config = store_config or DEFAULT_STORE_CONFIG
for path, attributes in store_config.items():
attributes = attributes or {} # None allowed
# check path
path = os.path.realpath(path)
storjnode.util.ensure_path_exists(path)
# check limit
limit = attributes.get("limit", 0)
assert(isinstance(limit, int) or isinstance(limit, long))
assert(limit >= 0)
free = storjnode.util.get_free_space(path)
used = storjnode.util.get_folder_size(path)
available = (free + used)
if limit > available:
msg = ("Invalid storage limit for {0}: {1} > available {2}. "
"Using available {2}!")
_log.warning(msg.format(path, limit, available))
limit = available # set to available if to large
# check use_folder_tree
use_folder_tree = attributes.get("use_folder_tree", False)
if not use_folder_tree and storjnode.util.get_fs_type(path) == "vfat":
use_folder_tree = True # pragma: no cover
normal_paths[path] = {
"use_folder_tree": use_folder_tree, "limit": limit
}
msg = "Storing data in '{0}' with a capacity of {1}bytes!"
# _log.info(msg.format(path, limit or available))
return normal_paths
def open(store_config, shard_id):
"""Retreives a shard from storage.
Args:
store_config: Dict of storage paths to optional attributes.
limit: The dir size limit in bytes, 0 for no limit.
use_folder_tree: Files organized in a folder tree
(always on for fat partitions).
shard_id: Id of the shard to retreive.
Returns:
A read only file object for the shard, the caller is responsable
for closing the file object.
Raises:
KeyError: If shard was not found.
AssertionError: If input not valid.
Example:
import storjnode
id = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"
store_config = {"path/alpha": None, "path/beta": None}
with storjnode.storage.store.open(store_config, id) as shard:
print(storjnode.storage.shard.get_id(shard)
"""
shard_path = find(store_config, shard_id)
if shard_path is not None:
return _builtin_open(shard_path, "rb")
else:
raise KeyError("Shard {0} not found!".format(shard_id))
def capacity(store_config):
""" Get the total, used and free capacity of the store.
Args:
store_config: Dict of storage paths to optional attributes.
limit: The dir size limit in bytes, 0 for no limit.
use_folder_tree: Files organized in a folder tree
(always on for fat partitions).
Returns:
{"total": int, "used": int, "free": int}
Raises:
AssertionError: If input not valid.
Example:
import storjnode
store_config = {"path/alpha": None, "path/beta": None}
print(storjnode.storage.manager.capacity(store_config))
"""
store_config = setup(store_config=store_config) # setup if needed
total, used, free = 0, 0, 0
# FIXME doesn't give correct total if multiple paths on same drive
for store_path, attributes in store_config.items():
free_disc_space = storjnode.util.get_free_space(store_path)
limit = attributes["limit"] or free_disc_space
path_used = storjnode.util.get_folder_size(store_path)
total += limit
used += path_used
free += limit - used
return {"total": total, "used": used, "free": free}
def add(store_config, shard):
""" Add a shard to the storage.
Args:
store_config: Dict of storage paths to optional attributes.
limit: The dir size limit in bytes, 0 for no limit.
use_folder_tree: Files organized in a folder tree
(always on for fat partitions).
shard: A file like object representing the shard.
Returns:
Path to the added shard.
Raises:
MemoryError: If note enough storage to add shard.
AssertionError: If input not valid.
Example:
import storjnode
store_config = {"path/a": None, "path/b": None}
with open("path/to/loose/shard", "rb") as shard:
storjnode.storage.store.add(store_config, shard)
"""
store_config = setup(store_config=store_config) # setup if needed
shard_id = storjnode.storage.shard.get_id(shard)
shard_size = storjnode.storage.shard.get_size(shard)
# check if already in storage
shard_path = find(store_config, shard_id)
if shard_path is not None:
return shard_path
# shuffle store paths to spread shards somewhat evenly
items = store_config.items()
random.shuffle(items)
for store_path, attributes in items:
# check if store path limit reached
limit = attributes["limit"]
used = storjnode.util.get_folder_size(store_path)
free = limit - used
if limit > 0 and shard_size > free:
msg = ("Store path limit reached for {3} cannot add {0}: "
"Required {1} > {2} free.")
_log.warning(msg.format(shard_id, shard_size,
free, store_path))
continue # try next storepath
# check if enough free disc space
free_space = storjnode.util.get_free_space(store_path)
if shard_size > free_space:
msg = ("Not enough disc space in {3} to add {0}: "
"Required {1} > {2} free.")
msg = msg.format(shard_id, shard_size, free_space, store_path)
_log.warning(msg)
continue # try next storepath
# save shard
use_folder_tree = attributes["use_folder_tree"]
shard_path = _get_shard_path(store_path, shard_id, use_folder_tree,
create_needed_folders=True)
storjnode.storage.shard.save(shard, shard_path)
return shard_path
raise MemoryError("Not enough space to add {0}!".format(shard_id))
def remove(store_config, shard_id):
"""Remove a shard from the store.
Args:
store_config: Dict of storage paths to optional attributes.
limit: The dir size limit in bytes, 0 for no limit.
use_folder_tree: Files organized in a folder tree
(always on for fat partitions).
shard_id: Id of the shard to be removed.
Raises:
AssertionError: If input not valid.
Example:
import storjnode
id = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"
store_config = {"path/alpha": None, "path/beta": None}
storjnode.storage.store.remove(store_config, id)
"""
shard_path = find(store_config, shard_id)
if shard_path is not None:
return os.remove(shard_path)
def find(store_config, shard_id):
"""Find the path of a shard.
Args:
store_config: Dict of storage paths to optional attributes.
limit: The dir size limit in bytes, 0 for no limit.
use_folder_tree: Files organized in a folder tree
(always on for fat partitions).
shard_id: Id of the shard to find.
Returns:
Path to the shard or None if not found.
Raises:
AssertionError: If input not valid.
Example:
import storjnode
id = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"
store_config = {"path/alpha": None, "path/beta": None}
shard_path = storjnode.storage.store.remove(store_config, id)
print("shard located at %s" % shard_path)
"""
assert(storjnode.storage.shard.valid_id(shard_id))
store_config = setup(store_config=store_config) # setup if needed
for store_path, attributes in store_config.items():
use_folder_tree = attributes["use_folder_tree"]
shard_path = _get_shard_path(store_path, shard_id, use_folder_tree)
if os.path.isfile(shard_path):
return shard_path
return None
# def import_file(store_config, source_path,
# max_shard_size=DEFAULT_SHARD_SIZE):
# """Import a file into the store.
#
# Args:
# source_path: The path of the file to be imported.
# max_shard_size: The maximum shard size.
#
# Returns: A list of shard ids with the fist entry being the root shard.
# All required shards to reconstruct a file can be obtained
# from the root shard.
# """
# store_config = setup(store_config=store_config) # setup if needed
# # FIXME add encryption
# # TODO implement
#
#
# def export_file(store_config, root_shard_id, dest_path):
# assert(storjnode.storage.shard.valid_id(root_shard_id))
# store_config = setup(store_config=store_config) # setup if needed
# # FIXME add encryption
# # TODO implement
|
|
#!/usr/bin/env python
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import errno
import json
import os
import re
import shutil
import subprocess
import sys
from contextlib import contextmanager
import click
import yaml
from setuptools import find_packages
def fail(message, *args, **kwargs):
click.echo(click.style('Error: ' + message.format(*args), fg='red', bold=True), err=True)
if 'verbose_msg' in kwargs:
click.echo(kwargs['verbose_msg'], err=True)
sys.exit(1)
def warn(message, *args):
click.echo(click.style(message.format(*args), fg='yellow', bold=True), err=True)
def info(message, *args):
click.echo(click.style(message.format(*args), fg='green', bold=True), err=True)
def step(message, *args):
click.echo(click.style(message.format(*args), fg='white', bold=True), err=True)
def _get_webpack_build_config(url_root='/'):
with open('indico/modules/events/themes.yaml') as f:
themes = yaml.safe_load(f.read())
root_path = os.path.abspath('indico')
return {
'build': {
'baseURLPath': url_root,
'clientPath': os.path.join(root_path, 'web', 'client'),
'rootPath': root_path,
'urlMapPath': os.path.normpath(os.path.join(root_path, '..', 'url_map.json')),
'staticPath': os.path.join(root_path, 'web', 'static'),
'staticURL': url_root.rstrip('/') + '/',
'distPath': os.path.join(root_path, 'web', 'static', 'dist'),
'distURL': os.path.join(url_root, 'dist/')
},
'themes': {key: {'stylesheet': theme['stylesheet'], 'print_stylesheet': theme.get('print_stylesheet')}
for key, theme in themes['definitions'].viewitems()
if set(theme) & {'stylesheet', 'print_stylesheet'}}
}
def _get_plugin_bundle_config(plugin_dir):
try:
with open(os.path.join(plugin_dir, 'webpack-bundles.json')) as f:
return json.load(f)
except IOError as e:
if e.errno == errno.ENOENT:
return {}
raise
def _get_plugin_build_deps(plugin_dir):
try:
with open(os.path.join(plugin_dir, 'required-build-plugins.json')) as f:
return json.load(f)
except IOError as e:
if e.errno == errno.ENOENT:
return []
raise
def _parse_plugin_theme_yaml(plugin_yaml):
# This is very similar to what ThemeSettingsProxy does
with open('indico/modules/events/themes.yaml') as f:
core_data = f.read()
core_data = re.sub(r'^(\S+:)$', r'__core_\1', core_data, flags=re.MULTILINE)
settings = {k: v
for k, v in yaml.safe_load(core_data + '\n' + plugin_yaml).viewitems()
if not k.startswith('__core_')}
return {name: {'stylesheet': theme['stylesheet'], 'print_stylesheet': theme.get('print_stylesheet')}
for name, theme in settings.get('definitions', {}).viewitems()
if set(theme) & {'stylesheet', 'print_stylesheet'}}
def _get_plugin_themes(plugin_dir):
bundle_config = _get_plugin_bundle_config(plugin_dir)
try:
theme_file = bundle_config['indicoTheme']
except KeyError:
return {}
with open(os.path.join(plugin_dir, theme_file)) as f:
return _parse_plugin_theme_yaml(f.read())
def _get_plugin_webpack_build_config(plugin_dir, url_root='/'):
core_config = _get_webpack_build_config(url_root)
packages = [x for x in find_packages(plugin_dir) if '.' not in x]
assert len(packages) == 1
plugin_root_path = os.path.join(plugin_dir, packages[0])
plugin_name = packages[0].replace('indico_', '') # XXX: find a better solution for this
return {
'isPlugin': True,
'plugin': plugin_name,
'indico': {
'build': core_config['build']
},
'build': {
'indicoSourcePath': os.path.abspath('.'),
'clientPath': os.path.join(plugin_root_path, 'client'),
'rootPath': plugin_root_path,
'urlMapPath': os.path.join(plugin_dir, 'url_map.json'),
'staticPath': os.path.join(plugin_root_path, 'static'),
'staticURL': os.path.join(url_root, 'static', 'plugins', plugin_name) + '/',
'distPath': os.path.join(plugin_root_path, 'static', 'dist'),
'distURL': os.path.join(url_root, 'static', 'plugins', plugin_name, 'dist/')
},
'themes': _get_plugin_themes(plugin_dir),
}
def _get_webpack_args(dev, watch):
args = ['--mode', 'development' if dev else 'production']
if watch:
args.append('--watch')
return args
@click.group()
def cli():
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
def _common_build_options(allow_watch=True):
def decorator(fn):
fn = click.option('--dev', is_flag=True, default=False, help="Build in dev mode")(fn)
fn = click.option('--clean/--no-clean', default=None,
help="Delete everything in dist. This is disabled by default for `--dev` builds.")(fn)
fn = click.option('--url-root', default='/', metavar='PATH',
help='URL root from which the assets are loaded. '
'Defaults to / and should usually not be changed')(fn)
if allow_watch:
fn = click.option('--watch', is_flag=True, default=False, help="Run the watcher to rebuild on changes")(fn)
return fn
return decorator
def _clean(webpack_build_config, plugin_dir=None):
dist_path = webpack_build_config['build']['distPath']
if os.path.exists(dist_path):
warn('deleting ' + os.path.relpath(dist_path, plugin_dir or os.curdir))
shutil.rmtree(dist_path)
@cli.command('indico', short_help='Builds assets of Indico.')
@_common_build_options()
def build_indico(dev, clean, watch, url_root):
"""Run webpack to build assets"""
clean = clean or (clean is None and not dev)
webpack_build_config_file = 'webpack-build-config.json'
webpack_build_config = _get_webpack_build_config(url_root)
with open(webpack_build_config_file, 'w') as f:
json.dump(webpack_build_config, f, indent=2, sort_keys=True)
if clean:
_clean(webpack_build_config)
force_url_map = ['--force'] if clean or not dev else []
url_map_path = webpack_build_config['build']['urlMapPath']
subprocess.check_call(['python', 'bin/maintenance/dump_url_map.py', '--output', url_map_path] + force_url_map)
args = _get_webpack_args(dev, watch)
try:
subprocess.check_call(['npx', 'webpack'] + args)
except subprocess.CalledProcessError:
fail('running webpack failed')
finally:
if not dev:
os.unlink(webpack_build_config_file)
def _validate_plugin_dir(ctx, param, value):
if not os.path.exists(os.path.join(value, 'setup.py')):
raise click.BadParameter('no setup.py found in {}'.format(value))
if (not os.path.exists(os.path.join(value, 'webpack.config.js')) and
not os.path.exists(os.path.join(value, 'webpack-bundles.json'))):
raise click.BadParameter('no webpack.config.js or webpack-bundles.json found in {}'.format(value))
return value
def _is_plugin_dir(path):
try:
_validate_plugin_dir(None, None, path)
except click.BadParameter:
return False
else:
return True
@contextmanager
def _chdir(path):
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
@cli.command('plugin', short_help='Builds assets of a plugin.')
@click.argument('plugin_dir', type=click.Path(exists=True, file_okay=False, resolve_path=True),
callback=_validate_plugin_dir)
@_common_build_options()
def build_plugin(plugin_dir, dev, clean, watch, url_root):
"""Run webpack to build plugin assets"""
clean = clean or (clean is None and not dev)
webpack_build_config_file = os.path.join(plugin_dir, 'webpack-build-config.json')
webpack_build_config = _get_plugin_webpack_build_config(plugin_dir, url_root)
with open(webpack_build_config_file, 'w') as f:
json.dump(webpack_build_config, f, indent=2, sort_keys=True)
if clean:
_clean(webpack_build_config, plugin_dir)
force_url_map = ['--force'] if clean or not dev else []
url_map_path = webpack_build_config['build']['urlMapPath']
dump_plugin_args = ['--plugin', webpack_build_config['plugin']]
for name in _get_plugin_build_deps(plugin_dir):
dump_plugin_args += ['--plugin', name]
subprocess.check_call(['python', 'bin/maintenance/dump_url_map.py',
'--output', url_map_path] + dump_plugin_args + force_url_map)
webpack_config_file = os.path.join(plugin_dir, 'webpack.config.js')
if not os.path.exists(webpack_config_file):
webpack_config_file = 'plugin.webpack.config.js'
if os.path.exists(os.path.join(plugin_dir, 'package.json')):
with _chdir(plugin_dir):
try:
subprocess.check_call(['npm', 'install', '--quiet'])
except subprocess.CalledProcessError:
fail('running npm failed')
args = _get_webpack_args(dev, watch)
args += ['--config', webpack_config_file]
os.environ['NODE_PATH'] = os.path.abspath('node_modules')
os.environ['INDICO_PLUGIN_ROOT'] = plugin_dir
try:
subprocess.check_call(['npx', 'webpack'] + args)
except subprocess.CalledProcessError:
fail('running webpack failed')
finally:
if not dev:
os.unlink(webpack_build_config_file)
@cli.command('all-plugins', short_help='Builds assets of all plugins in a directory.')
@click.argument('plugins_dir', type=click.Path(exists=True, file_okay=False, resolve_path=True))
@_common_build_options(allow_watch=False)
@click.pass_context
def build_all_plugins(ctx, plugins_dir, dev, clean, url_root):
"""Run webpack to build plugin assets"""
plugins = sorted(d for d in os.listdir(plugins_dir) if _is_plugin_dir(os.path.join(plugins_dir, d)))
for plugin in plugins:
step('plugin: {}', plugin)
ctx.invoke(build_plugin, plugin_dir=os.path.join(plugins_dir, plugin), dev=dev, clean=clean, watch=False,
url_root=url_root)
if __name__ == '__main__':
cli()
|
|
from __future__ import absolute_import, print_function, division
import sys
import pytest
pytestmark = pytest.mark.skipif(sys.version_info[0] == 3,
reason="PyHive doesn't work with Python 3.x")
pyspark = pytest.importorskip('pyspark')
py4j = pytest.importorskip('py4j')
sa = pytest.importorskip('sqlalchemy')
import os
import itertools
import shutil
from py4j.protocol import Py4JJavaError
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from blaze import compute, symbol, into, by, sin, exp, cos, tan, join
try:
from pyspark.sql import DataFrame as SparkDataFrame
except ImportError:
from pyspark.sql import SchemaRDD as SparkDataFrame
from pyspark import HiveContext, SQLContext
from pyspark.sql import Row, SchemaRDD
from odo import odo, discover
from odo.utils import tmpfile
data = [['Alice', 100.0, 1],
['Bob', 200.0, 2],
['Alice', 50.0, 3]]
date_data = []
np.random.seed(0)
for attr in ('YearBegin', 'MonthBegin', 'Day', 'Hour', 'Minute', 'Second'):
rng = pd.date_range(start='now', periods=len(data),
freq=getattr(pd.datetools, attr)()).values
date_data += list(zip(np.random.choice(['Alice', 'Bob', 'Joe', 'Lester'],
size=len(data)),
np.random.rand(len(data)) * 100,
np.random.randint(100, size=3),
rng))
cities_data = [['Alice', 'NYC'],
['Bob', 'Boston']]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
date_df = pd.DataFrame(date_data, columns=['name', 'amount', 'id', 'ds'])
cities_df = pd.DataFrame(cities_data, columns=['name', 'city'])
# sc is from conftest.py
@pytest.yield_fixture(scope='module')
def sql(sc):
try:
if hasattr(pyspark.sql, 'types'): # pyspark >= 1.3
yield HiveContext(sc)
else:
yield SQLContext(sc)
finally:
dbpath = 'metastore_db'
logpath = 'derby.log'
if os.path.exists(dbpath):
assert os.path.isdir(dbpath)
shutil.rmtree(dbpath)
if os.path.exists(logpath):
assert os.path.isfile(logpath)
os.remove(logpath)
@pytest.yield_fixture(scope='module')
def people(sc):
with tmpfile('.txt') as fn:
df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0],
amount=float(person[1]),
id=int(person[2])))
@pytest.yield_fixture(scope='module')
def cities(sc):
with tmpfile('.txt') as fn:
cities_df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0], city=person[1]))
@pytest.yield_fixture(scope='module')
def date_people(sc):
with tmpfile('.txt') as fn:
date_df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0],
amount=float(person[1]),
id=int(person[2]),
ds=pd.Timestamp(person[3]).to_pydatetime()))
@pytest.fixture(scope='module')
def ctx(sql, people, cities, date_people):
try:
sql.registerDataFrameAsTable(sql.createDataFrame(people), 't')
sql.cacheTable('t')
sql.registerDataFrameAsTable(sql.createDataFrame(cities), 's')
sql.cacheTable('s')
sql.registerDataFrameAsTable(sql.createDataFrame(date_people), 'dates')
sql.cacheTable('dates')
except AttributeError:
sql.inferSchema(people).registerTempTable('t')
sql.inferSchema(cities).registerTempTable('s')
sql.inferSchema(date_people).registerTempTable('dates')
return sql
@pytest.fixture(scope='module')
def db(ctx):
return symbol('db', discover(ctx))
def test_projection(db, ctx):
expr = db.t[['id', 'name']]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result) == into(set, expected)
def test_symbol_compute(db, ctx):
assert isinstance(compute(db.t, ctx), (SparkDataFrame, SchemaRDD))
def test_field_access(db, ctx):
for field in db.t.fields:
expr = getattr(db.t, field)
result = into(pd.Series, compute(expr, ctx))
expected = compute(expr, {db: {'t': df}})
assert result.name == expected.name
np.testing.assert_array_equal(result.values,
expected.values)
def test_head(db, ctx):
expr = db.t[['name', 'amount']].head(2)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(list, result) == into(list, expected)
def test_literals(db, ctx):
expr = db.t[db.t.amount >= 100]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
def test_by_summary(db, ctx):
t = db.t
expr = by(t.name, mymin=t.amount.min(), mymax=t.amount.max())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result) == into(set, expected)
def test_join(db, ctx):
expr = join(db.t, db.s)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df, 's': cities_df}})
assert isinstance(result, (SparkDataFrame, SchemaRDD))
assert into(set, result) == into(set, expected)
assert discover(result) == expr.dshape
def test_join_diff_contexts(db, ctx, cities):
expr = join(db.t, db.s, 'name')
people = ctx.table('t')
cities = into(ctx, cities, dshape=discover(ctx.table('s')))
scope = {db: {'t': people, 's': cities}}
result = compute(expr, scope)
expected = compute(expr, {db: {'t': df, 's': cities_df}})
assert (set(map(frozenset, odo(result, set))) ==
set(map(frozenset, odo(expected, set))))
def test_field_distinct(ctx, db):
expr = db.t.name.distinct()
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
def test_boolean(ctx, db):
expr = db.t.amount > 50
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
def test_selection(ctx, db):
expr = db.t[db.t.amount > 50]
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
def test_selection_field(ctx, db):
expr = db.t[db.t.amount > 50].name
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
@pytest.mark.parametrize(['field', 'reduction'],
itertools.product(['id', 'amount'], ['sum', 'max',
'min', 'mean',
'count',
'nunique']))
def test_reductions(ctx, db, field, reduction):
expr = getattr(db.t[field], reduction)()
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(list, result)[0][0] == expected
def test_column_arithmetic(ctx, db):
expr = db.t.amount + 1
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
# pyspark doesn't use __version__ so we use this kludge
# should submit a bug report upstream to get __version__
def fail_on_spark_one_two(x):
if hasattr(pyspark.sql, 'types'):
return x
else:
return pytest.mark.xfail(x, raises=py4j.protocol.Py4JJavaError,
reason=('math functions only supported in '
'HiveContext'))
@pytest.mark.parametrize('func', list(map(fail_on_spark_one_two,
[sin, cos, tan, exp])))
def test_math(ctx, db, func):
expr = func(db.t.amount)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
np.testing.assert_allclose(np.sort(odo(result, np.ndarray,
dshape=expr.dshape)),
np.sort(odo(expected, np.ndarray)))
@pytest.mark.parametrize(['field', 'ascending'],
itertools.product(['name', 'id', ['name', 'amount']],
[True, False]))
def test_sort(ctx, db, field, ascending):
expr = db.t.sort(field, ascending=ascending)
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
@pytest.mark.xfail
def test_map(ctx, db):
expr = db.t.id.map(lambda x: x + 1, 'int')
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert into(set, result, dshape=expr.dshape) == into(set, expected)
@pytest.mark.parametrize(['grouper', 'reducer', 'reduction'],
itertools.chain(itertools.product(['name', 'id',
['id', 'amount']],
['id', 'amount'],
['sum', 'count',
'max', 'min',
'mean',
'nunique']),
[('name', 'name', 'count'),
('name', 'name', 'nunique')]))
def test_by(ctx, db, grouper, reducer, reduction):
t = db.t
expr = by(t[grouper], total=getattr(t[reducer], reduction)())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert (set(map(frozenset, into(list, result))) ==
set(map(frozenset, into(list, expected))))
@pytest.mark.parametrize(['reducer', 'reduction'],
itertools.product(['id', 'name'],
['count', 'nunique']))
def test_multikey_by(ctx, db, reducer, reduction):
t = db.t
expr = by(t[['id', 'amount']], total=getattr(getattr(t, reducer),
reduction)())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert (set(map(frozenset, into(list, result))) ==
set(map(frozenset, into(list, expected))))
def test_grouper_with_arith(ctx, db):
expr = by(db.t[['id', 'amount']], total=(db.t.amount + 1).sum())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
def test_by_non_native_ops(ctx, db):
expr = by(db.t.id, total=db.t.id.nunique())
result = compute(expr, ctx)
expected = compute(expr, {db: {'t': df}})
assert list(map(set, into(list, result))) == list(map(set, into(list,
expected)))
@pytest.mark.xfail(not hasattr(pyspark.sql, 'types'),
reason=('length string function not available without '
'HiveContext'),
raises=py4j.protocol.Py4JJavaError)
def test_strlen(ctx, db):
expr = db.t.name.strlen()
result = odo(compute(expr, ctx), pd.Series)
expected = compute(expr, {db: {'t': df}})
assert result.name == 'name'
assert expected.name == 'name'
assert odo(result, set) == odo(expected, set)
date_attrs = [pytest.mark.xfail(not hasattr(pyspark.sql, 'types'),
attr,
raises=(Py4JJavaError, AssertionError),
reason=('date attribute %r not supported '
'without hive') % attr)
for attr in ['year', 'month', 'day', 'hour', 'minute', 'second']]
date_attrs += [pytest.mark.xfail(attr,
raises=Py4JJavaError,
reason=('Hive does not support date '
'attribute %r') % attr)
for attr in ['millisecond', 'microsecond']]
@pytest.mark.parametrize('attr', date_attrs)
def test_by_with_date(ctx, db, attr):
# TODO: investigate CSV writing precision between pandas 0.16.0 and 0.16.1
# TODO: see if we can use odo to convert the dshape of an existing
# DataFrame
expr = by(getattr(db.dates.ds, attr),
mean=db.dates.amount.mean())
result = odo(compute(expr, ctx), pd.DataFrame).sort('mean').reset_index(drop=True)
expected = compute(expr, {db: {'dates': date_df}}).sort('mean').reset_index(drop=True)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize('keys', [[1], [1, 2]])
def test_isin(ctx, db, keys):
expr = db.t[db.t.id.isin(keys)]
result = odo(compute(expr, ctx), set)
expected = odo(compute(expr, {db: {'t': df}}), set)
assert (set(map(frozenset, odo(result, list))) ==
set(map(frozenset, odo(expected, list))))
def test_nunique_spark_dataframe(ctx, db):
assert (odo(compute(db.t.nunique(), ctx), int) ==
ctx.table('t').distinct().count())
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from testtools import matchers
from keystone.common import driver_hints
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import filtering
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class IdentityTests(object):
def _get_domain_fixture(self):
domain = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain['id'], domain)
return domain
def _set_domain_scope(self, domain_id):
# We only provide a domain scope if we have multiple drivers
if CONF.identity.domain_specific_drivers_enabled:
return domain_id
def test_authenticate_bad_user(self):
with self.make_request():
self.assertRaises(AssertionError,
PROVIDERS.identity_api.authenticate,
user_id=uuid.uuid4().hex,
password=self.user_foo['password'])
def test_authenticate_bad_password(self):
with self.make_request():
self.assertRaises(AssertionError,
PROVIDERS.identity_api.authenticate,
user_id=self.user_foo['id'],
password=uuid.uuid4().hex)
def test_authenticate(self):
with self.make_request():
user_ref = PROVIDERS.identity_api.authenticate(
user_id=self.user_sna['id'],
password=self.user_sna['password'])
# NOTE(termie): the password field is left in user_sna to make
# it easier to authenticate in tests, but should
# not be returned by the api
self.user_sna.pop('password')
self.user_sna['enabled'] = True
self.assertUserDictEqual(self.user_sna, user_ref)
def test_authenticate_and_get_roles_no_metadata(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
# Remove user id. It is ignored by create_user() and will break the
# subset test below.
del user['id']
new_user = PROVIDERS.identity_api.create_user(user)
role_member = unit.new_role_ref()
PROVIDERS.role_api.create_role(role_member['id'], role_member)
PROVIDERS.assignment_api.add_role_to_user_and_project(
new_user['id'], self.project_baz['id'], role_member['id']
)
with self.make_request():
user_ref = PROVIDERS.identity_api.authenticate(
user_id=new_user['id'],
password=user['password'])
self.assertNotIn('password', user_ref)
# NOTE(termie): the password field is left in user_sna to make
# it easier to authenticate in tests, but should
# not be returned by the api
user.pop('password')
self.assertLessEqual(user.items(), user_ref.items())
role_list = PROVIDERS.assignment_api.get_roles_for_user_and_project(
new_user['id'], self.project_baz['id'])
self.assertEqual(1, len(role_list))
self.assertIn(role_member['id'], role_list)
def test_authenticate_if_no_password_set(self):
id_ = uuid.uuid4().hex
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
PROVIDERS.identity_api.create_user(user)
with self.make_request():
self.assertRaises(AssertionError,
PROVIDERS.identity_api.authenticate,
user_id=id_,
password='password')
def test_create_unicode_user_name(self):
unicode_name = u'name \u540d\u5b57'
user = unit.new_user_ref(name=unicode_name,
domain_id=CONF.identity.default_domain_id)
ref = PROVIDERS.identity_api.create_user(user)
self.assertEqual(unicode_name, ref['name'])
def test_get_user(self):
user_ref = PROVIDERS.identity_api.get_user(self.user_foo['id'])
# NOTE(termie): the password field is left in user_foo to make
# it easier to authenticate in tests, but should
# not be returned by the api
self.user_foo.pop('password')
# NOTE(edmondsw): check that options is set, even if it's just an
# empty dict, because otherwise auth will blow up for whatever
# case misses this.
self.assertIn('options', user_ref)
self.assertDictEqual(self.user_foo, user_ref)
def test_get_user_returns_required_attributes(self):
user_ref = PROVIDERS.identity_api.get_user(self.user_foo['id'])
self.assertIn('id', user_ref)
self.assertIn('name', user_ref)
self.assertIn('enabled', user_ref)
self.assertIn('password_expires_at', user_ref)
@unit.skip_if_cache_disabled('identity')
def test_cache_layer_get_user(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
PROVIDERS.identity_api.create_user(user)
ref = PROVIDERS.identity_api.get_user_by_name(
user['name'], user['domain_id']
)
# cache the result.
PROVIDERS.identity_api.get_user(ref['id'])
# delete bypassing identity api
domain_id, driver, entity_id = (
PROVIDERS.identity_api._get_domain_driver_and_entity_id(ref['id']))
driver.delete_user(entity_id)
self.assertDictEqual(ref, PROVIDERS.identity_api.get_user(ref['id']))
PROVIDERS.identity_api.get_user.invalidate(
PROVIDERS.identity_api, ref['id']
)
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.get_user, ref['id'])
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = PROVIDERS.identity_api.create_user(user)
ref = PROVIDERS.identity_api.get_user_by_name(
user['name'], user['domain_id']
)
user['description'] = uuid.uuid4().hex
# cache the result.
PROVIDERS.identity_api.get_user(ref['id'])
# update using identity api and get back updated user.
user_updated = PROVIDERS.identity_api.update_user(ref['id'], user)
self.assertLessEqual(
PROVIDERS.identity_api.get_user(ref['id']).items(),
user_updated.items()
)
self.assertLessEqual(
PROVIDERS.identity_api.get_user_by_name(
ref['name'], ref['domain_id']).items(),
user_updated.items()
)
def test_get_user_returns_not_found(self):
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.get_user,
uuid.uuid4().hex)
def test_get_user_by_name(self):
user_ref = PROVIDERS.identity_api.get_user_by_name(
self.user_foo['name'], CONF.identity.default_domain_id)
# NOTE(termie): the password field is left in user_foo to make
# it easier to authenticate in tests, but should
# not be returned by the api
self.user_foo.pop('password')
self.assertDictEqual(self.user_foo, user_ref)
@unit.skip_if_cache_disabled('identity')
def test_cache_layer_get_user_by_name(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
PROVIDERS.identity_api.create_user(user)
ref = PROVIDERS.identity_api.get_user_by_name(
user['name'], user['domain_id']
)
# delete bypassing the identity api.
domain_id, driver, entity_id = (
PROVIDERS.identity_api._get_domain_driver_and_entity_id(ref['id']))
driver.delete_user(entity_id)
self.assertDictEqual(ref, PROVIDERS.identity_api.get_user_by_name(
user['name'], CONF.identity.default_domain_id))
PROVIDERS.identity_api.get_user_by_name.invalidate(
PROVIDERS.identity_api,
user['name'],
CONF.identity.default_domain_id
)
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.get_user_by_name,
user['name'], CONF.identity.default_domain_id)
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = PROVIDERS.identity_api.create_user(user)
ref = PROVIDERS.identity_api.get_user_by_name(
user['name'], user['domain_id']
)
user['description'] = uuid.uuid4().hex
user_updated = PROVIDERS.identity_api.update_user(ref['id'], user)
self.assertLessEqual(
PROVIDERS.identity_api.get_user(ref['id']).items(),
user_updated.items()
)
self.assertLessEqual(
PROVIDERS.identity_api.get_user_by_name(
ref['name'],
ref['domain_id']
).items(),
user_updated.items()
)
def test_get_user_by_name_returns_not_found(self):
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.get_user_by_name,
uuid.uuid4().hex,
CONF.identity.default_domain_id)
def test_create_duplicate_user_name_fails(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = PROVIDERS.identity_api.create_user(user)
self.assertRaises(exception.Conflict,
PROVIDERS.identity_api.create_user,
user)
def test_create_duplicate_user_name_in_different_domains(self):
new_domain = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain)
user1 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user2 = unit.new_user_ref(name=user1['name'],
domain_id=new_domain['id'])
PROVIDERS.identity_api.create_user(user1)
PROVIDERS.identity_api.create_user(user2)
def test_move_user_between_domains(self):
domain1 = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain1['id'], domain1)
domain2 = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain2['id'], domain2)
user = unit.new_user_ref(domain_id=domain1['id'])
user = PROVIDERS.identity_api.create_user(user)
user['domain_id'] = domain2['id']
self.assertRaises(exception.ValidationError,
PROVIDERS.identity_api.update_user, user['id'], user)
def test_rename_duplicate_user_name_fails(self):
user1 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user2 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
PROVIDERS.identity_api.create_user(user1)
user2 = PROVIDERS.identity_api.create_user(user2)
user2['name'] = user1['name']
self.assertRaises(exception.Conflict,
PROVIDERS.identity_api.update_user,
user2['id'],
user2)
def test_update_user_id_fails(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = PROVIDERS.identity_api.create_user(user)
original_id = user['id']
user['id'] = 'fake2'
self.assertRaises(exception.ValidationError,
PROVIDERS.identity_api.update_user,
original_id,
user)
user_ref = PROVIDERS.identity_api.get_user(original_id)
self.assertEqual(original_id, user_ref['id'])
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.get_user,
'fake2')
def test_delete_user_with_group_project_domain_links(self):
role1 = unit.new_role_ref()
PROVIDERS.role_api.create_role(role1['id'], role1)
domain1 = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain1['id'], domain1)
project1 = unit.new_project_ref(domain_id=domain1['id'])
PROVIDERS.resource_api.create_project(project1['id'], project1)
user1 = unit.new_user_ref(domain_id=domain1['id'])
user1 = PROVIDERS.identity_api.create_user(user1)
group1 = unit.new_group_ref(domain_id=domain1['id'])
group1 = PROVIDERS.identity_api.create_group(group1)
PROVIDERS.assignment_api.create_grant(
user_id=user1['id'], project_id=project1['id'], role_id=role1['id']
)
PROVIDERS.assignment_api.create_grant(
user_id=user1['id'], domain_id=domain1['id'], role_id=role1['id']
)
PROVIDERS.identity_api.add_user_to_group(
user_id=user1['id'], group_id=group1['id']
)
roles_ref = PROVIDERS.assignment_api.list_grants(
user_id=user1['id'],
project_id=project1['id'])
self.assertEqual(1, len(roles_ref))
roles_ref = PROVIDERS.assignment_api.list_grants(
user_id=user1['id'],
domain_id=domain1['id'])
self.assertEqual(1, len(roles_ref))
PROVIDERS.identity_api.check_user_in_group(
user_id=user1['id'],
group_id=group1['id'])
PROVIDERS.identity_api.delete_user(user1['id'])
self.assertRaises(exception.NotFound,
PROVIDERS.identity_api.check_user_in_group,
user1['id'],
group1['id'])
def test_delete_group_with_user_project_domain_links(self):
role1 = unit.new_role_ref()
PROVIDERS.role_api.create_role(role1['id'], role1)
domain1 = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain1['id'], domain1)
project1 = unit.new_project_ref(domain_id=domain1['id'])
PROVIDERS.resource_api.create_project(project1['id'], project1)
user1 = unit.new_user_ref(domain_id=domain1['id'])
user1 = PROVIDERS.identity_api.create_user(user1)
group1 = unit.new_group_ref(domain_id=domain1['id'])
group1 = PROVIDERS.identity_api.create_group(group1)
PROVIDERS.assignment_api.create_grant(
group_id=group1['id'], project_id=project1['id'],
role_id=role1['id']
)
PROVIDERS.assignment_api.create_grant(
group_id=group1['id'], domain_id=domain1['id'], role_id=role1['id']
)
PROVIDERS.identity_api.add_user_to_group(
user_id=user1['id'], group_id=group1['id']
)
roles_ref = PROVIDERS.assignment_api.list_grants(
group_id=group1['id'],
project_id=project1['id'])
self.assertEqual(1, len(roles_ref))
roles_ref = PROVIDERS.assignment_api.list_grants(
group_id=group1['id'],
domain_id=domain1['id'])
self.assertEqual(1, len(roles_ref))
PROVIDERS.identity_api.check_user_in_group(
user_id=user1['id'],
group_id=group1['id'])
PROVIDERS.identity_api.delete_group(group1['id'])
PROVIDERS.identity_api.get_user(user1['id'])
def test_update_user_returns_not_found(self):
user_id = uuid.uuid4().hex
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.update_user,
user_id,
{'id': user_id,
'domain_id': CONF.identity.default_domain_id})
def test_delete_user_returns_not_found(self):
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.delete_user,
uuid.uuid4().hex)
def test_create_user_with_long_password(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id,
password='a' * 2000)
# success create a user with long password
PROVIDERS.identity_api.create_user(user)
def test_create_user_missed_password(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = PROVIDERS.identity_api.create_user(user)
PROVIDERS.identity_api.get_user(user['id'])
# Make sure the user is not allowed to login
# with a password that is empty string or None
with self.make_request():
self.assertRaises(AssertionError,
PROVIDERS.identity_api.authenticate,
user_id=user['id'],
password='')
self.assertRaises(AssertionError,
PROVIDERS.identity_api.authenticate,
user_id=user['id'],
password=None)
def test_create_user_none_password(self):
user = unit.new_user_ref(password=None,
domain_id=CONF.identity.default_domain_id)
user = PROVIDERS.identity_api.create_user(user)
PROVIDERS.identity_api.get_user(user['id'])
# Make sure the user is not allowed to login
# with a password that is empty string or None
with self.make_request():
self.assertRaises(AssertionError,
PROVIDERS.identity_api.authenticate,
user_id=user['id'],
password='')
self.assertRaises(AssertionError,
PROVIDERS.identity_api.authenticate,
user_id=user['id'],
password=None)
def test_list_users(self):
users = PROVIDERS.identity_api.list_users(
domain_scope=self._set_domain_scope(
CONF.identity.default_domain_id))
self.assertEqual(len(default_fixtures.USERS), len(users))
user_ids = set(user['id'] for user in users)
expected_user_ids = set(getattr(self, 'user_%s' % user['name'])['id']
for user in default_fixtures.USERS)
for user_ref in users:
self.assertNotIn('password', user_ref)
self.assertEqual(expected_user_ids, user_ids)
def _build_hints(self, hints, filters, fed_dict):
for key in filters:
hints.add_filter(key,
fed_dict[key],
comparator='equals')
return hints
def _build_fed_resource(self):
# create one test mapping, two idps and two protocols for federation
# test.
new_mapping = unit.new_mapping_ref()
PROVIDERS.federation_api.create_mapping(new_mapping['id'], new_mapping)
for idp_id, protocol_id in [('ORG_IDP', 'saml2'),
('myidp', 'mapped')]:
new_idp = unit.new_identity_provider_ref(idp_id=idp_id,
domain_id='default')
new_protocol = unit.new_protocol_ref(protocol_id=protocol_id,
idp_id=idp_id,
mapping_id=new_mapping['id'])
PROVIDERS.federation_api.create_idp(new_idp['id'], new_idp)
PROVIDERS.federation_api.create_protocol(new_idp['id'],
new_protocol['id'],
new_protocol)
def _test_list_users_with_attribute(self, filters, fed_dict):
self._build_fed_resource()
domain = self._get_domain_fixture()
# Call list_users while no match exists for the federated user
hints = driver_hints.Hints()
hints = self._build_hints(hints, filters, fed_dict)
users = PROVIDERS.identity_api.list_users(hints=hints)
self.assertEqual(0, len(users))
# list_users with a new relational user and federated user
hints = self._build_hints(hints, filters, fed_dict)
PROVIDERS.shadow_users_api.create_federated_user(
domain['id'], fed_dict
)
users = PROVIDERS.identity_api.list_users(hints=hints)
self.assertEqual(1, len(users))
# create another federated user that shouldnt be matched and ensure
# that still only one match is found
hints = self._build_hints(hints, filters, fed_dict)
fed_dict2 = unit.new_federated_user_ref()
fed_dict2['idp_id'] = 'myidp'
fed_dict2['protocol_id'] = 'mapped'
PROVIDERS.shadow_users_api.create_federated_user(
domain['id'], fed_dict2
)
users = PROVIDERS.identity_api.list_users(hints=hints)
self.assertEqual(1, len(users))
# create another federated user that should also be matched and ensure
# that there are now two matches in the users list. Unless there is a
# unique id in the filter since unique_ids must be unique and would
# therefore cause a duplicate error.
hints = self._build_hints(hints, filters, fed_dict)
if not any('unique_id' in x['name'] for x in hints.filters):
hints = self._build_hints(hints, filters, fed_dict)
fed_dict3 = unit.new_federated_user_ref()
# check which filters are here and create another match
for filters_ in hints.filters:
if filters_['name'] == 'idp_id':
fed_dict3['idp_id'] = fed_dict['idp_id']
elif filters_['name'] == 'protocol_id':
fed_dict3['protocol_id'] = fed_dict['protocol_id']
PROVIDERS.shadow_users_api.create_federated_user(
domain['id'], fed_dict3
)
users = PROVIDERS.identity_api.list_users(hints=hints)
self.assertEqual(2, len(users))
def test_list_users_with_unique_id(self):
federated_dict = unit.new_federated_user_ref()
filters = ['unique_id']
self._test_list_users_with_attribute(filters, federated_dict)
def test_list_users_with_idp_id(self):
federated_dict = unit.new_federated_user_ref()
filters = ['idp_id']
self._test_list_users_with_attribute(filters, federated_dict)
def test_list_users_with_protocol_id(self):
federated_dict = unit.new_federated_user_ref()
filters = ['protocol_id']
self._test_list_users_with_attribute(filters, federated_dict)
def test_list_users_with_unique_id_and_idp_id(self):
federated_dict = unit.new_federated_user_ref()
filters = ['unique_id', 'idp_id']
self._test_list_users_with_attribute(filters, federated_dict)
def test_list_users_with_unique_id_and_protocol_id(self):
federated_dict = unit.new_federated_user_ref()
filters = ['unique_id', 'protocol_id']
self._test_list_users_with_attribute(filters, federated_dict)
def test_list_users_with_idp_id_protocol_id(self):
federated_dict = unit.new_federated_user_ref()
filters = ['idp_id', 'protocol_id']
self._test_list_users_with_attribute(filters, federated_dict)
def test_list_users_with_all_federated_attributes(self):
federated_dict = unit.new_federated_user_ref()
filters = ['unique_id', 'idp_id', 'protocol_id']
self._test_list_users_with_attribute(filters, federated_dict)
def test_list_users_with_name(self):
self._build_fed_resource()
federated_dict_1 = unit.new_federated_user_ref(
display_name='test1@federation.org')
federated_dict_2 = unit.new_federated_user_ref(
display_name='test2@federation.org')
domain = self._get_domain_fixture()
hints = driver_hints.Hints()
hints.add_filter('name', 'test1@federation.org')
users = self.identity_api.list_users(hints=hints)
self.assertEqual(0, len(users))
self.shadow_users_api.create_federated_user(domain['id'],
federated_dict_1)
self.shadow_users_api.create_federated_user(domain['id'],
federated_dict_2)
hints = driver_hints.Hints()
hints.add_filter('name', 'test1@federation.org')
users = self.identity_api.list_users(hints=hints)
self.assertEqual(1, len(users))
hints = driver_hints.Hints()
hints.add_filter('name', 'test1@federation.org')
hints.add_filter('idp_id', 'ORG_IDP')
users = self.identity_api.list_users(hints=hints)
self.assertEqual(1, len(users))
def test_list_groups(self):
group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group2 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group1 = PROVIDERS.identity_api.create_group(group1)
group2 = PROVIDERS.identity_api.create_group(group2)
groups = PROVIDERS.identity_api.list_groups(
domain_scope=self._set_domain_scope(
CONF.identity.default_domain_id))
self.assertEqual(2, len(groups))
group_ids = []
for group in groups:
group_ids.append(group.get('id'))
self.assertIn(group1['id'], group_ids)
self.assertIn(group2['id'], group_ids)
def test_create_user_doesnt_modify_passed_in_dict(self):
new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
original_user = new_user.copy()
PROVIDERS.identity_api.create_user(new_user)
self.assertDictEqual(original_user, new_user)
def test_update_user_enable(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = PROVIDERS.identity_api.create_user(user)
user_ref = PROVIDERS.identity_api.get_user(user['id'])
self.assertTrue(user_ref['enabled'])
user['enabled'] = False
PROVIDERS.identity_api.update_user(user['id'], user)
user_ref = PROVIDERS.identity_api.get_user(user['id'])
self.assertEqual(user['enabled'], user_ref['enabled'])
# If not present, enabled field should not be updated
del user['enabled']
PROVIDERS.identity_api.update_user(user['id'], user)
user_ref = PROVIDERS.identity_api.get_user(user['id'])
self.assertFalse(user_ref['enabled'])
user['enabled'] = True
PROVIDERS.identity_api.update_user(user['id'], user)
user_ref = PROVIDERS.identity_api.get_user(user['id'])
self.assertEqual(user['enabled'], user_ref['enabled'])
del user['enabled']
PROVIDERS.identity_api.update_user(user['id'], user)
user_ref = PROVIDERS.identity_api.get_user(user['id'])
self.assertTrue(user_ref['enabled'])
def test_update_user_name(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = PROVIDERS.identity_api.create_user(user)
user_ref = PROVIDERS.identity_api.get_user(user['id'])
self.assertEqual(user['name'], user_ref['name'])
changed_name = user_ref['name'] + '_changed'
user_ref['name'] = changed_name
updated_user = PROVIDERS.identity_api.update_user(
user_ref['id'], user_ref
)
# NOTE(dstanek): the SQL backend adds an 'extra' field containing a
# dictionary of the extra fields in addition to the
# fields in the object. For the details see:
# SqlIdentity.test_update_project_returns_extra
updated_user.pop('extra', None)
self.assertDictEqual(user_ref, updated_user)
user_ref = PROVIDERS.identity_api.get_user(user_ref['id'])
self.assertEqual(changed_name, user_ref['name'])
def test_add_user_to_group(self):
domain = self._get_domain_fixture()
new_group = unit.new_group_ref(domain_id=domain['id'])
new_group = PROVIDERS.identity_api.create_group(new_group)
new_user = unit.new_user_ref(domain_id=domain['id'])
new_user = PROVIDERS.identity_api.create_user(new_user)
PROVIDERS.identity_api.add_user_to_group(
new_user['id'], new_group['id']
)
groups = PROVIDERS.identity_api.list_groups_for_user(new_user['id'])
found = False
for x in groups:
if (x['id'] == new_group['id']):
found = True
self.assertTrue(found)
def test_add_user_to_group_returns_not_found(self):
domain = self._get_domain_fixture()
new_user = unit.new_user_ref(domain_id=domain['id'])
new_user = PROVIDERS.identity_api.create_user(new_user)
self.assertRaises(exception.GroupNotFound,
PROVIDERS.identity_api.add_user_to_group,
new_user['id'],
uuid.uuid4().hex)
new_group = unit.new_group_ref(domain_id=domain['id'])
new_group = PROVIDERS.identity_api.create_group(new_group)
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.add_user_to_group,
uuid.uuid4().hex,
new_group['id'])
self.assertRaises(exception.NotFound,
PROVIDERS.identity_api.add_user_to_group,
uuid.uuid4().hex,
uuid.uuid4().hex)
def test_check_user_in_group(self):
domain = self._get_domain_fixture()
new_group = unit.new_group_ref(domain_id=domain['id'])
new_group = PROVIDERS.identity_api.create_group(new_group)
new_user = unit.new_user_ref(domain_id=domain['id'])
new_user = PROVIDERS.identity_api.create_user(new_user)
PROVIDERS.identity_api.add_user_to_group(
new_user['id'], new_group['id']
)
PROVIDERS.identity_api.check_user_in_group(
new_user['id'], new_group['id']
)
def test_check_user_not_in_group(self):
new_group = unit.new_group_ref(
domain_id=CONF.identity.default_domain_id)
new_group = PROVIDERS.identity_api.create_group(new_group)
new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
new_user = PROVIDERS.identity_api.create_user(new_user)
self.assertRaises(exception.NotFound,
PROVIDERS.identity_api.check_user_in_group,
new_user['id'],
new_group['id'])
def test_check_user_in_group_returns_not_found(self):
new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
new_user = PROVIDERS.identity_api.create_user(new_user)
new_group = unit.new_group_ref(
domain_id=CONF.identity.default_domain_id)
new_group = PROVIDERS.identity_api.create_group(new_group)
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.check_user_in_group,
uuid.uuid4().hex,
new_group['id'])
self.assertRaises(exception.GroupNotFound,
PROVIDERS.identity_api.check_user_in_group,
new_user['id'],
uuid.uuid4().hex)
self.assertRaises(exception.NotFound,
PROVIDERS.identity_api.check_user_in_group,
uuid.uuid4().hex,
uuid.uuid4().hex)
def test_list_users_in_group(self):
domain = self._get_domain_fixture()
new_group = unit.new_group_ref(domain_id=domain['id'])
new_group = PROVIDERS.identity_api.create_group(new_group)
# Make sure we get an empty list back on a new group, not an error.
user_refs = PROVIDERS.identity_api.list_users_in_group(new_group['id'])
self.assertEqual([], user_refs)
# Make sure we get the correct users back once they have been added
# to the group.
new_user = unit.new_user_ref(domain_id=domain['id'])
new_user = PROVIDERS.identity_api.create_user(new_user)
PROVIDERS.identity_api.add_user_to_group(
new_user['id'], new_group['id']
)
user_refs = PROVIDERS.identity_api.list_users_in_group(new_group['id'])
found = False
for x in user_refs:
if (x['id'] == new_user['id']):
found = True
self.assertNotIn('password', x)
self.assertTrue(found)
def test_list_users_in_group_returns_not_found(self):
self.assertRaises(exception.GroupNotFound,
PROVIDERS.identity_api.list_users_in_group,
uuid.uuid4().hex)
def test_list_groups_for_user(self):
domain = self._get_domain_fixture()
test_groups = []
test_users = []
GROUP_COUNT = 3
USER_COUNT = 2
for x in range(0, USER_COUNT):
new_user = unit.new_user_ref(domain_id=domain['id'])
new_user = PROVIDERS.identity_api.create_user(new_user)
test_users.append(new_user)
positive_user = test_users[0]
negative_user = test_users[1]
for x in range(0, USER_COUNT):
group_refs = PROVIDERS.identity_api.list_groups_for_user(
test_users[x]['id'])
self.assertEqual(0, len(group_refs))
for x in range(0, GROUP_COUNT):
before_count = x
after_count = x + 1
new_group = unit.new_group_ref(domain_id=domain['id'])
new_group = PROVIDERS.identity_api.create_group(new_group)
test_groups.append(new_group)
# add the user to the group and ensure that the
# group count increases by one for each
group_refs = PROVIDERS.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(before_count, len(group_refs))
PROVIDERS.identity_api.add_user_to_group(
positive_user['id'],
new_group['id'])
group_refs = PROVIDERS.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(after_count, len(group_refs))
# Make sure the group count for the unrelated user did not change
group_refs = PROVIDERS.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEqual(0, len(group_refs))
def test_remove_user_from_group(self):
domain = self._get_domain_fixture()
new_group = unit.new_group_ref(domain_id=domain['id'])
new_group = PROVIDERS.identity_api.create_group(new_group)
new_user = unit.new_user_ref(domain_id=domain['id'])
new_user = PROVIDERS.identity_api.create_user(new_user)
PROVIDERS.identity_api.add_user_to_group(
new_user['id'], new_group['id']
)
groups = PROVIDERS.identity_api.list_groups_for_user(new_user['id'])
self.assertIn(new_group['id'], [x['id'] for x in groups])
PROVIDERS.identity_api.remove_user_from_group(
new_user['id'], new_group['id']
)
groups = PROVIDERS.identity_api.list_groups_for_user(new_user['id'])
self.assertNotIn(new_group['id'], [x['id'] for x in groups])
def test_remove_user_from_group_returns_not_found(self):
domain = self._get_domain_fixture()
new_user = unit.new_user_ref(domain_id=domain['id'])
new_user = PROVIDERS.identity_api.create_user(new_user)
new_group = unit.new_group_ref(domain_id=domain['id'])
new_group = PROVIDERS.identity_api.create_group(new_group)
self.assertRaises(exception.GroupNotFound,
PROVIDERS.identity_api.remove_user_from_group,
new_user['id'],
uuid.uuid4().hex)
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.remove_user_from_group,
uuid.uuid4().hex,
new_group['id'])
self.assertRaises(exception.NotFound,
PROVIDERS.identity_api.remove_user_from_group,
uuid.uuid4().hex,
uuid.uuid4().hex)
def test_group_crud(self):
domain = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain['id'], domain)
group = unit.new_group_ref(domain_id=domain['id'])
group = PROVIDERS.identity_api.create_group(group)
group_ref = PROVIDERS.identity_api.get_group(group['id'])
self.assertLessEqual(group.items(), group_ref.items())
group['name'] = uuid.uuid4().hex
PROVIDERS.identity_api.update_group(group['id'], group)
group_ref = PROVIDERS.identity_api.get_group(group['id'])
self.assertLessEqual(group.items(), group_ref.items())
PROVIDERS.identity_api.delete_group(group['id'])
self.assertRaises(exception.GroupNotFound,
PROVIDERS.identity_api.get_group,
group['id'])
def test_create_group_name_with_trailing_whitespace(self):
group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group_name = group['name'] = (group['name'] + ' ')
group_returned = PROVIDERS.identity_api.create_group(group)
self.assertEqual(group_returned['name'], group_name.strip())
def test_update_group_name_with_trailing_whitespace(self):
group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group_create = PROVIDERS.identity_api.create_group(group)
group_name = group['name'] = (group['name'] + ' ')
group_update = PROVIDERS.identity_api.update_group(
group_create['id'], group
)
self.assertEqual(group_update['id'], group_create['id'])
self.assertEqual(group_update['name'], group_name.strip())
def test_get_group_by_name(self):
group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group_name = group['name']
group = PROVIDERS.identity_api.create_group(group)
spoiler = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
PROVIDERS.identity_api.create_group(spoiler)
group_ref = PROVIDERS.identity_api.get_group_by_name(
group_name, CONF.identity.default_domain_id)
self.assertDictEqual(group, group_ref)
def test_get_group_by_name_returns_not_found(self):
self.assertRaises(exception.GroupNotFound,
PROVIDERS.identity_api.get_group_by_name,
uuid.uuid4().hex,
CONF.identity.default_domain_id)
@unit.skip_if_cache_disabled('identity')
def test_cache_layer_group_crud(self):
group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group = PROVIDERS.identity_api.create_group(group)
# cache the result
group_ref = PROVIDERS.identity_api.get_group(group['id'])
# delete the group bypassing identity api.
domain_id, driver, entity_id = (
PROVIDERS.identity_api._get_domain_driver_and_entity_id(
group['id']
)
)
driver.delete_group(entity_id)
self.assertEqual(
group_ref, PROVIDERS.identity_api.get_group(group['id'])
)
PROVIDERS.identity_api.get_group.invalidate(
PROVIDERS.identity_api, group['id']
)
self.assertRaises(exception.GroupNotFound,
PROVIDERS.identity_api.get_group, group['id'])
group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group = PROVIDERS.identity_api.create_group(group)
# cache the result
PROVIDERS.identity_api.get_group(group['id'])
group['name'] = uuid.uuid4().hex
group_ref = PROVIDERS.identity_api.update_group(group['id'], group)
# after updating through identity api, get updated group
self.assertLessEqual(
PROVIDERS.identity_api.get_group(group['id']).items(),
group_ref.items()
)
def test_create_duplicate_group_name_fails(self):
group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group2 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id,
name=group1['name'])
group1 = PROVIDERS.identity_api.create_group(group1)
self.assertRaises(exception.Conflict,
PROVIDERS.identity_api.create_group,
group2)
def test_create_duplicate_group_name_in_different_domains(self):
new_domain = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain)
group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group2 = unit.new_group_ref(domain_id=new_domain['id'],
name=group1['name'])
group1 = PROVIDERS.identity_api.create_group(group1)
group2 = PROVIDERS.identity_api.create_group(group2)
def test_move_group_between_domains(self):
domain1 = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain1['id'], domain1)
domain2 = unit.new_domain_ref()
PROVIDERS.resource_api.create_domain(domain2['id'], domain2)
group = unit.new_group_ref(domain_id=domain1['id'])
group = PROVIDERS.identity_api.create_group(group)
group['domain_id'] = domain2['id']
self.assertRaises(exception.ValidationError,
PROVIDERS.identity_api.update_group,
group['id'], group)
def test_user_crud(self):
user_dict = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id)
del user_dict['id']
user = PROVIDERS.identity_api.create_user(user_dict)
user_ref = PROVIDERS.identity_api.get_user(user['id'])
del user_dict['password']
user_ref_dict = {x: user_ref[x] for x in user_ref}
self.assertLessEqual(user_dict.items(), user_ref_dict.items())
user_dict['password'] = uuid.uuid4().hex
PROVIDERS.identity_api.update_user(user['id'], user_dict)
user_ref = PROVIDERS.identity_api.get_user(user['id'])
del user_dict['password']
user_ref_dict = {x: user_ref[x] for x in user_ref}
self.assertLessEqual(user_dict.items(), user_ref_dict.items())
PROVIDERS.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
PROVIDERS.identity_api.get_user,
user['id'])
def test_arbitrary_attributes_are_returned_from_create_user(self):
attr_value = uuid.uuid4().hex
user_data = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id,
arbitrary_attr=attr_value)
user = PROVIDERS.identity_api.create_user(user_data)
self.assertEqual(attr_value, user['arbitrary_attr'])
def test_arbitrary_attributes_are_returned_from_get_user(self):
attr_value = uuid.uuid4().hex
user_data = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id,
arbitrary_attr=attr_value)
user_data = PROVIDERS.identity_api.create_user(user_data)
user = PROVIDERS.identity_api.get_user(user_data['id'])
self.assertEqual(attr_value, user['arbitrary_attr'])
def test_new_arbitrary_attributes_are_returned_from_update_user(self):
user_data = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id)
user = PROVIDERS.identity_api.create_user(user_data)
attr_value = uuid.uuid4().hex
user['arbitrary_attr'] = attr_value
updated_user = PROVIDERS.identity_api.update_user(user['id'], user)
self.assertEqual(attr_value, updated_user['arbitrary_attr'])
def test_updated_arbitrary_attributes_are_returned_from_update_user(self):
attr_value = uuid.uuid4().hex
user_data = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id,
arbitrary_attr=attr_value)
new_attr_value = uuid.uuid4().hex
user = PROVIDERS.identity_api.create_user(user_data)
user['arbitrary_attr'] = new_attr_value
updated_user = PROVIDERS.identity_api.update_user(user['id'], user)
self.assertEqual(new_attr_value, updated_user['arbitrary_attr'])
def test_user_update_and_user_get_return_same_response(self):
user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = PROVIDERS.identity_api.create_user(user)
updated_user = {'enabled': False}
updated_user_ref = PROVIDERS.identity_api.update_user(
user['id'], updated_user)
# SQL backend adds 'extra' field
updated_user_ref.pop('extra', None)
self.assertIs(False, updated_user_ref['enabled'])
user_ref = PROVIDERS.identity_api.get_user(user['id'])
self.assertDictEqual(updated_user_ref, user_ref)
@unit.skip_if_no_multiple_domains_support
def test_list_domains_filtered_and_limited(self):
# The test is designed for multiple domains only
def create_domains(domain_count, domain_name_prefix):
for _ in range(domain_count):
domain_name = '%s-%s' % (domain_name_prefix, uuid.uuid4().hex)
domain = unit.new_domain_ref(name=domain_name)
self.domain_list[domain_name] = \
PROVIDERS.resource_api.create_domain(domain['id'], domain)
def clean_up_domains():
for _, domain in self.domain_list.items():
domain['enabled'] = False
PROVIDERS.resource_api.update_domain(domain['id'], domain)
PROVIDERS.resource_api.delete_domain(domain['id'])
self.domain_list = {}
create_domains(2, 'domaingroup1')
create_domains(3, 'domaingroup2')
self.addCleanup(clean_up_domains)
unfiltered_domains = PROVIDERS.resource_api.list_domains()
# Should get back just 4 entities
self.config_fixture.config(list_limit=4)
hints = driver_hints.Hints()
entities = PROVIDERS.resource_api.list_domains(hints=hints)
self.assertThat(entities, matchers.HasLength(hints.limit['limit']))
self.assertTrue(hints.limit['truncated'])
# Get one exact item from the list
hints = driver_hints.Hints()
hints.add_filter('name', unfiltered_domains[3]['name'])
entities = PROVIDERS.resource_api.list_domains(hints=hints)
self.assertThat(entities, matchers.HasLength(1))
self.assertEqual(entities[0], unfiltered_domains[3])
# Get 2 entries
hints = driver_hints.Hints()
hints.add_filter('name', 'domaingroup1', comparator='startswith')
entities = PROVIDERS.resource_api.list_domains(hints=hints)
self.assertThat(entities, matchers.HasLength(2))
self.assertThat(entities[0]['name'],
matchers.StartsWith('domaingroup1'))
self.assertThat(entities[1]['name'],
matchers.StartsWith('domaingroup1'))
@unit.skip_if_no_multiple_domains_support
def test_list_limit_for_domains(self):
def create_domains(count):
for _ in range(count):
domain = unit.new_domain_ref()
self.domain_list.append(
PROVIDERS.resource_api.create_domain(domain['id'], domain))
def clean_up_domains():
for domain in self.domain_list:
PROVIDERS.resource_api.update_domain(
domain['id'], {'enabled': False})
PROVIDERS.resource_api.delete_domain(domain['id'])
self.domain_list = []
create_domains(6)
self.addCleanup(clean_up_domains)
for x in range(1, 7):
self.config_fixture.config(group='resource', list_limit=x)
hints = driver_hints.Hints()
entities = PROVIDERS.resource_api.list_domains(hints=hints)
self.assertThat(entities, matchers.HasLength(hints.limit['limit']))
class FilterTests(filtering.FilterTests):
def test_list_entities_filtered(self):
for entity in ['user', 'group', 'project']:
# Create 20 entities
entity_list = self._create_test_data(entity, 20)
# Try filtering to get one an exact item out of the list
hints = driver_hints.Hints()
hints.add_filter('name', entity_list[10]['name'])
entities = self._list_entities(entity)(hints=hints)
self.assertEqual(1, len(entities))
self.assertEqual(entity_list[10]['id'], entities[0]['id'])
# Check the driver has removed the filter from the list hints
self.assertFalse(hints.get_exact_filter_by_name('name'))
self._delete_test_data(entity, entity_list)
def test_list_users_inexact_filtered(self):
# Create 20 users, some with specific names. We set the names at create
# time (rather than updating them), since the LDAP driver does not
# support name updates.
user_name_data = {
# user index: name for user
5: 'The',
6: 'The Ministry',
7: 'The Ministry of',
8: 'The Ministry of Silly',
9: 'The Ministry of Silly Walks',
# ...and one for useful case insensitivity testing
10: 'The ministry of silly walks OF'
}
user_list = self._create_test_data(
'user', 20, domain_id=CONF.identity.default_domain_id,
name_dict=user_name_data)
hints = driver_hints.Hints()
hints.add_filter('name', 'ministry', comparator='contains')
users = PROVIDERS.identity_api.list_users(hints=hints)
self.assertEqual(5, len(users))
self._match_with_list(users, user_list,
list_start=6, list_end=11)
# TODO(henry-nash) Check inexact filter has been removed.
hints = driver_hints.Hints()
hints.add_filter('name', 'The', comparator='startswith')
users = PROVIDERS.identity_api.list_users(hints=hints)
self.assertEqual(6, len(users))
self._match_with_list(users, user_list,
list_start=5, list_end=11)
# TODO(henry-nash) Check inexact filter has been removed.
hints = driver_hints.Hints()
hints.add_filter('name', 'of', comparator='endswith')
users = PROVIDERS.identity_api.list_users(hints=hints)
self.assertEqual(2, len(users))
# We can't assume we will get back the users in any particular order
self.assertIn(user_list[7]['id'], [users[0]['id'], users[1]['id']])
self.assertIn(user_list[10]['id'], [users[0]['id'], users[1]['id']])
# TODO(henry-nash) Check inexact filter has been removed.
# TODO(henry-nash): Add some case sensitive tests. However,
# these would be hard to validate currently, since:
#
# For SQL, the issue is that MySQL 0.7, by default, is installed in
# case insensitive mode (which is what is run by default for our
# SQL backend tests). For production deployments. OpenStack
# assumes a case sensitive database. For these tests, therefore, we
# need to be able to check the sensitivity of the database so as to
# know whether to run case sensitive tests here.
#
# For LDAP/AD, although dependent on the schema being used, attributes
# are typically configured to be case aware, but not case sensitive.
self._delete_test_data('user', user_list)
def _groups_for_user_data(self):
number_of_groups = 10
group_name_data = {
# entity index: name for entity
5: 'The',
6: 'The Ministry',
9: 'The Ministry of Silly Walks',
}
group_list = self._create_test_data(
'group', number_of_groups,
domain_id=CONF.identity.default_domain_id,
name_dict=group_name_data)
user_list = self._create_test_data('user', 2)
for group in range(7):
# Create membership, including with two out of the three groups
# with well know names
PROVIDERS.identity_api.add_user_to_group(
user_list[0]['id'], group_list[group]['id']
)
# ...and some spoiler memberships
for group in range(7, number_of_groups):
PROVIDERS.identity_api.add_user_to_group(
user_list[1]['id'], group_list[group]['id']
)
return group_list, user_list
def test_groups_for_user_inexact_filtered(self):
"""Test use of filtering doesn't break groups_for_user listing.
Some backends may use filtering to achieve the list of groups for a
user, so test that it can combine a second filter.
Test Plan:
- Create 10 groups, some with names we can filter on
- Create 2 users
- Assign 1 of those users to most of the groups, including some of the
well known named ones
- Assign the other user to other groups as spoilers
- Ensure that when we list groups for users with a filter on the group
name, both restrictions have been enforced on what is returned.
"""
group_list, user_list = self._groups_for_user_data()
hints = driver_hints.Hints()
hints.add_filter('name', 'Ministry', comparator='contains')
groups = PROVIDERS.identity_api.list_groups_for_user(
user_list[0]['id'], hints=hints)
# We should only get back one group, since of the two that contain
# 'Ministry' the user only belongs to one.
self.assertThat(len(groups), matchers.Equals(1))
self.assertEqual(group_list[6]['id'], groups[0]['id'])
hints = driver_hints.Hints()
hints.add_filter('name', 'The', comparator='startswith')
groups = PROVIDERS.identity_api.list_groups_for_user(
user_list[0]['id'], hints=hints)
# We should only get back 2 out of the 3 groups that start with 'The'
# hence showing that both "filters" have been applied
self.assertThat(len(groups), matchers.Equals(2))
self.assertIn(group_list[5]['id'], [groups[0]['id'], groups[1]['id']])
self.assertIn(group_list[6]['id'], [groups[0]['id'], groups[1]['id']])
hints.add_filter('name', 'The', comparator='endswith')
groups = PROVIDERS.identity_api.list_groups_for_user(
user_list[0]['id'], hints=hints)
# We should only get back one group since it is the only one that
# ends with 'The'
self.assertThat(len(groups), matchers.Equals(1))
self.assertEqual(group_list[5]['id'], groups[0]['id'])
self._delete_test_data('user', user_list)
self._delete_test_data('group', group_list)
def test_groups_for_user_exact_filtered(self):
"""Test exact filters doesn't break groups_for_user listing."""
group_list, user_list = self._groups_for_user_data()
hints = driver_hints.Hints()
hints.add_filter('name', 'The Ministry', comparator='equals')
groups = PROVIDERS.identity_api.list_groups_for_user(
user_list[0]['id'], hints=hints)
# We should only get back 1 out of the 3 groups with name 'The
# Ministry' hence showing that both "filters" have been applied.
self.assertEqual(1, len(groups))
self.assertEqual(group_list[6]['id'], groups[0]['id'])
self._delete_test_data('user', user_list)
self._delete_test_data('group', group_list)
def _get_user_name_field_size(self):
"""Return the size of the user name field for the backend.
Subclasses can override this method to indicate that the user name
field is limited in length. The user name is the field used in the test
that validates that a filter value works even if it's longer than a
field.
If the backend doesn't limit the value length then return None.
"""
return None
def test_filter_value_wider_than_field(self):
# If a filter value is given that's larger than the field in the
# backend then no values are returned.
user_name_field_size = self._get_user_name_field_size()
if user_name_field_size is None:
# The backend doesn't limit the size of the user name, so pass this
# test.
return
# Create some users just to make sure would return something if the
# filter was ignored.
self._create_test_data('user', 2)
hints = driver_hints.Hints()
value = 'A' * (user_name_field_size + 1)
hints.add_filter('name', value)
users = PROVIDERS.identity_api.list_users(hints=hints)
self.assertEqual([], users)
def _list_users_in_group_data(self):
number_of_users = 10
user_name_data = {
1: 'Arthur Conan Doyle',
3: 'Arthur Rimbaud',
9: 'Arthur Schopenhauer',
}
user_list = self._create_test_data(
'user', number_of_users,
domain_id=CONF.identity.default_domain_id,
name_dict=user_name_data)
group = self._create_one_entity(
'group', CONF.identity.default_domain_id, 'Great Writers')
for i in range(7):
PROVIDERS.identity_api.add_user_to_group(
user_list[i]['id'], group['id']
)
return user_list, group
def test_list_users_in_group_inexact_filtered(self):
user_list, group = self._list_users_in_group_data()
hints = driver_hints.Hints()
hints.add_filter('name', 'Arthur', comparator='contains')
users = PROVIDERS.identity_api.list_users_in_group(
group['id'], hints=hints
)
self.assertThat(len(users), matchers.Equals(2))
self.assertIn(user_list[1]['id'], [users[0]['id'], users[1]['id']])
self.assertIn(user_list[3]['id'], [users[0]['id'], users[1]['id']])
hints = driver_hints.Hints()
hints.add_filter('name', 'Arthur', comparator='startswith')
users = PROVIDERS.identity_api.list_users_in_group(
group['id'], hints=hints
)
self.assertThat(len(users), matchers.Equals(2))
self.assertIn(user_list[1]['id'], [users[0]['id'], users[1]['id']])
self.assertIn(user_list[3]['id'], [users[0]['id'], users[1]['id']])
hints = driver_hints.Hints()
hints.add_filter('name', 'Doyle', comparator='endswith')
users = PROVIDERS.identity_api.list_users_in_group(
group['id'], hints=hints
)
self.assertThat(len(users), matchers.Equals(1))
self.assertEqual(user_list[1]['id'], users[0]['id'])
self._delete_test_data('user', user_list)
self._delete_entity('group')(group['id'])
def test_list_users_in_group_exact_filtered(self):
hints = driver_hints.Hints()
user_list, group = self._list_users_in_group_data()
hints.add_filter('name', 'Arthur Rimbaud', comparator='equals')
users = PROVIDERS.identity_api.list_users_in_group(
group['id'], hints=hints
)
self.assertEqual(1, len(users))
self.assertEqual(user_list[3]['id'], users[0]['id'])
self._delete_test_data('user', user_list)
self._delete_entity('group')(group['id'])
class LimitTests(filtering.FilterTests):
ENTITIES = ['user', 'group', 'project']
def setUp(self):
"""Setup for Limit Test Cases."""
self.entity_lists = {}
for entity in self.ENTITIES:
# Create 20 entities
self.entity_lists[entity] = self._create_test_data(entity, 20)
self.addCleanup(self.clean_up_entities)
def clean_up_entities(self):
"""Clean up entity test data from Limit Test Cases."""
for entity in self.ENTITIES:
self._delete_test_data(entity, self.entity_lists[entity])
del self.entity_lists
def _test_list_entity_filtered_and_limited(self, entity):
self.config_fixture.config(list_limit=10)
# Should get back just 10 entities
hints = driver_hints.Hints()
entities = self._list_entities(entity)(hints=hints)
self.assertEqual(hints.limit['limit'], len(entities))
self.assertTrue(hints.limit['truncated'])
# Override with driver specific limit
if entity == 'project':
self.config_fixture.config(group='resource', list_limit=5)
else:
self.config_fixture.config(group='identity', list_limit=5)
# Should get back just 5 users
hints = driver_hints.Hints()
entities = self._list_entities(entity)(hints=hints)
self.assertEqual(hints.limit['limit'], len(entities))
# Finally, let's pretend we want to get the full list of entities,
# even with the limits set, as part of some internal calculation.
# Calling the API without a hints list should achieve this, and
# return at least the 20 entries we created (there may be other
# entities lying around created by other tests/setup).
entities = self._list_entities(entity)()
self.assertGreaterEqual(len(entities), 20)
self._match_with_list(self.entity_lists[entity], entities)
def test_list_users_filtered_and_limited(self):
self._test_list_entity_filtered_and_limited('user')
def test_list_groups_filtered_and_limited(self):
self._test_list_entity_filtered_and_limited('group')
def test_list_projects_filtered_and_limited(self):
self._test_list_entity_filtered_and_limited('project')
|
|
# encoding: utf-8
# Copyright 2013 maker
# License
"""
Project management forms
"""
from django.forms import ModelForm, CharField, TextInput, Form, ModelChoiceField, IntegerField, ChoiceField
from maker.projects.models import Project, Milestone, Task, TaskTimeSlot, TaskStatus
from maker.core.models import Object, ModuleSetting, UpdateRecord
from maker.identities.models import Contact
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from maker.core.decorators import preprocess_form
preprocess_form()
class SettingsForm(Form):
"""
Administration settings form
"""
default_task_status = ModelChoiceField(label='Default Task Status', queryset=[])
def __init__(self, user, *args, **kwargs):
"Sets choices and initial value"
super(SettingsForm, self).__init__(*args, **kwargs)
self.fields['default_task_status'].queryset = Object.filter_permitted(user,
TaskStatus.objects, mode='x')
try:
conf = ModuleSetting.get_for_module('maker.projects', 'default_task_status')[0]
default_task_status = TaskStatus.objects.get(pk=long(conf.value))
self.fields['default_task_status'].initial = default_task_status.id
except Exception:
pass
def save(self):
"Form processor"
try:
ModuleSetting.set_for_module('default_task_status',
self.cleaned_data['default_task_status'].id,
'maker.projects')
except Exception:
return False
class MassActionForm(Form):
""" Mass action form for Tasks and Milestones """
status = ModelChoiceField(queryset=[], required=False)
project = ModelChoiceField(queryset=[], required=False)
milestone = ModelChoiceField(queryset=[], required=False)
delete = ChoiceField(label=_("Delete"), choices=(('', '-----'), ('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
instance = None
def __init__(self, user, *args, **kwargs):
if 'instance' in kwargs:
self.instance = kwargs['instance']
del kwargs['instance']
super(MassActionForm, self).__init__(*args, **kwargs)
self.fields['status'].queryset = Object.filter_permitted(user, TaskStatus.objects, mode='x')
self.fields['status'].label = _("Mark as")
self.fields['project'].queryset = Object.filter_permitted(user, Project.objects, mode='x')
self.fields['project'].label = _("Move to Project")
self.fields['milestone'].queryset = Object.filter_permitted(user, Milestone.objects, mode='x')
self.fields['milestone'].label = _("Move to Milestone")
self.fields['delete'] = ChoiceField(label=_("Delete"), choices=(('', '-----'),
('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
def save(self, *args, **kwargs):
"Save override to omit empty fields"
if self.instance:
if self.is_valid():
if self.cleaned_data['project']:
self.instance.project = self.cleaned_data['project']
if self.cleaned_data['status']:
self.instance.status = self.cleaned_data['status']
if self.cleaned_data['milestone']:
self.instance.milestone = self.cleaned_data['milestone']
self.instance.save()
if self.cleaned_data['delete']:
if self.cleaned_data['delete'] == 'delete':
self.instance.delete()
if self.cleaned_data['delete'] == 'trash':
self.instance.trash = True
self.instance.save()
class ProjectForm(ModelForm):
""" Project form """
name = CharField(widget=TextInput(attrs={'size':'50'}))
def __init__(self, user, project_id, *args, **kwargs):
super(ProjectForm, self ).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['parent'].queryset = Object.filter_permitted(user, Project.objects, mode='x')
self.fields['parent'].label = _("Parent")
if project_id:
self.fields['parent'].initial = project_id
self.fields['manager'].queryset = Object.filter_permitted(user, Contact.objects, mode='x')
self.fields['manager'].label = _("Manager")
self.fields['manager'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['manager'].widget.attrs.update({'popuplink': reverse('identities_contact_add')})
self.fields['client'].label = _("Client")
self.fields['client'].queryset = Object.filter_permitted(user, Contact.objects, mode='x')
self.fields['client'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['client'].widget.attrs.update({'popuplink': reverse('identities_contact_add')})
self.fields['details'].label = _("Details")
class Meta:
"Project"
model = Project
fields = ('name', 'parent', 'manager', 'client', 'details')
class MilestoneForm(ModelForm):
""" Milestone form """
name = CharField(widget=TextInput(attrs={'size':'50'}))
def __init__(self, user, project_id, *args, **kwargs):
super(MilestoneForm, self ).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['project'].label = _("Project")
self.fields['project'].queryset = Object.filter_permitted(user, Project.objects, mode='x')
if project_id:
self.fields['project'].initial = project_id
self.fields['status'].label = _("Status")
self.fields['status'].queryset = Object.filter_permitted(user, TaskStatus.objects, mode='x')
try:
conf = ModuleSetting.get_for_module('maker.projects', 'default_task_status')[0]
self.fields['status'].initial = long(conf.value)
except Exception:
pass
# Set datepicker
self.fields['start_date'].label = _("Start date")
self.fields['start_date'].widget.attrs.update({'class': 'datetimepicker'})
self.fields['end_date'].label = _("End date")
self.fields['end_date'].widget.attrs.update({'class': 'datetimepicker'})
if 'instance' in kwargs:
instance = kwargs['instance']
if instance.start_date:
self.fields['start_date'].widget.attrs.update({'initial': instance.start_date.strftime('%s')})
if instance.end_date:
self.fields['end_date'].widget.attrs.update({'initial': instance.end_date.strftime('%s')})
self.fields['details'].label = _("Details")
class Meta:
"Milestone"
model = Milestone
fields = ('name', 'project', 'status', 'start_date', 'end_date', 'details')
class TaskForm(ModelForm):
""" Task form """
name = CharField(widget=TextInput(attrs={'size':'50'}))
def __init__(self, user, parent, project_id, milestone_id, *args, **kwargs):
"Populates form with fields from given Project"
super(TaskForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['name'].widget.attrs.update({'class': 'duplicates',
'callback': reverse('projects_ajax_task_lookup')})
self.fields['status'].label = _("Status")
self.fields['status'].queryset = Object.filter_permitted(user, TaskStatus.objects, mode='x')
try:
conf = ModuleSetting.get_for_module('maker.projects', 'default_task_status')[0]
self.fields['status'].initial = long(conf.value)
except Exception:
pass
self.user = user
self.fields['assigned'].label = _("Assigned")
self.fields['assigned'].help_text = ""
self.fields['assigned'].widget.attrs.update({'class': 'multicomplete',
'callback': reverse('identities_ajax_user_lookup')})
self.fields['caller'].label = _("Caller")
self.fields['caller'].queryset = Object.filter_permitted(user, Contact.objects, mode='x')
if not self.instance.id:
contact = user.get_contact()
if contact:
self.fields['caller'].initial = contact.id
self.instance.caller = contact
self.fields['caller'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['caller'].widget.attrs.update({'popuplink': reverse('identities_contact_add')})
self.fields['project'].label = _("Project")
self.fields['project'].queryset = Object.filter_permitted(user, Project.objects, mode='x')
if project_id:
self.fields['project'].initial = project_id
self.fields['milestone'].label = _("Milestone")
self.fields['milestone'].queryset = Object.filter_permitted(user, Milestone.objects, mode='x')
if milestone_id:
self.fields['milestone'].initial = milestone_id
self.fields['parent'].label = _("Parent")
self.fields['parent'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('projects_ajax_task_lookup')})
self.fields['depends'].label = _("Depends on")
self.fields['depends'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('projects_ajax_task_lookup')})
self.fields['milestone'].queryset = Object.filter_permitted(user, Milestone.objects, mode='x')
self.fields['parent'].queryset = Object.filter_permitted(user, Task.objects, mode='x')
self.fields['priority'].label = _("Priority")
self.fields['priority'].initial = 3
self.fields['priority'].choices = ((5, _('Highest')), (4, _('High')), (3, _('Normal')), (2, _('Low')), (1, _('Lowest')))
self.fields['parent'].queryset = Object.filter_permitted(user, Task.objects, mode='x')
if parent:
self.fields['parent'].initial = parent.id
self.fields['project'].initial = parent.project_id
if parent.milestone_id:
self.fields['milestone'].initial = parent.milestone_id
# Set datepicker
self.fields['start_date'].label = _("Start date")
self.fields['start_date'].widget.attrs.update({'class': 'datetimepicker'})
self.fields['end_date'].label = _("End date")
self.fields['end_date'].widget.attrs.update({'class': 'datetimepicker'})
if 'instance' in kwargs:
instance = kwargs['instance']
if instance.start_date:
self.fields['start_date'].widget.attrs.update({'initial': instance.start_date.strftime('%s')})
if instance.end_date:
self.fields['end_date'].widget.attrs.update({'initial': instance.end_date.strftime('%s')})
self.fields['details'].label = _("Details")
self.fields['estimated_time'].label = _("Estimated time")
self.fields['estimated_time'].help_text = _("minutes")
def old_save(self, *args, **kwargs):
"Override save to set Subscribers and send Notifications"
original = None
original_assigned = []
if hasattr(self, 'instance'):
try:
original = Task.objects.get(pk=self.instance.id)
original_assigned = list(original.assigned.all())
except Task.DoesNotExist:
pass
instance = super(TaskForm, self).save(*args, **kwargs)
if original:
new_assigned = list(self.cleaned_data['assigned'])
if original_assigned != new_assigned:
for assignee in new_assigned:
self.instance.subscribers.add(assignee)
return instance
class Meta:
"Task"
model = Task
fields = ('name', 'parent', 'depends', 'assigned', 'project', 'milestone', 'caller',
'priority', 'status', 'start_date', 'end_date', 'estimated_time', 'details')
class TaskTimeSlotForm(ModelForm):
""" Task time slot form """
minutes = IntegerField(widget=TextInput(attrs={'size':'5'}))
def __init__(self, user, task_id, *args, **kwargs):
super(TaskTimeSlotForm, self).__init__(*args, **kwargs)
self.fields['time_from'].label = _("Started")
self.fields['time_to'].label = _("Finished")
# Set datepicker
self.fields['time_from'].widget.attrs.update({'class': 'datetimepicker'})
self.fields['time_to'].widget.attrs.update({'class': 'datetimepicker'})
if 'instance' in kwargs:
instance = kwargs['instance']
if instance.time_from:
self.fields['time_from'].widget.attrs.update({'initial': instance.time_from.strftime('%s')})
if instance.time_to:
self.fields['time_to'].widget.attrs.update({'initial': instance.time_to.strftime('%s')})
self.fields['minutes'].label = _("Minutes")
self.fields['details'].label = _("Details")
self.fields['details'].widget.attrs.update({'class': 'no-editor'})
if 'instance' in kwargs:
self.instance = kwargs['instance']
if self.instance.id:
del self.fields['minutes']
else:
del self.fields['time_from']
del self.fields['time_to']
else:
del self.fields['time_from']
del self.fields['time_to']
def save(self, *args, **kwargs):
"Override to auto-set time_from and time_to"
if hasattr(self, 'instance') and self.instance.time_to and not self.instance.time_from:
minutes = long(self.cleaned_data['minutes'])
hours = 0L
days = 0L
if minutes >= 1440:
hours = minutes // 60
minutes %= 60
if hours >= 24:
days = hours // 24
hours %= 24
delta = timedelta(days=days, hours=hours, minutes=minutes)
self.instance.time_from = self.instance.time_to - delta
return super(TaskTimeSlotForm, self).save(*args, **kwargs)
class Meta:
"TaskTimeSlot"
model = TaskTimeSlot
fields = ('time_from', 'time_to', 'minutes', 'details')
class TaskStatusForm(ModelForm):
""" TaskStatus form """
name = CharField(widget=TextInput(attrs={'size':'30'}))
def __init__(self, user, *args, **kwargs):
super(TaskStatusForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['active'].label = _("Active")
self.fields['hidden'].label = _("Hidden")
self.fields['details'].label = _("Details")
class Meta:
"TaskStatus"
model = TaskStatus
fields = ('name', 'active', 'hidden', 'details')
class FilterForm(ModelForm):
""" Filter form definition """
def __init__(self, user, skip=[], *args, **kwargs):
super(FilterForm, self).__init__(*args, **kwargs)
self.fields['caller'].label = _("Caller")
if 'caller' in skip:
del self.fields['caller']
else:
self.fields['caller'].queryset = Object.filter_permitted(user, Contact.objects, mode='x')
self.fields['caller'].required = False
self.fields['caller'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_contact_lookup')})
self.fields['status'].label = _("Status")
if 'status' in skip:
del self.fields['status']
else:
self.fields['status'].queryset = Object.filter_permitted(user, TaskStatus.objects, mode='x')
self.fields['status'].required = False
self.fields['assigned'].label = _("Assigned")
self.fields['assigned'].widget.attrs.update({'class': 'multicomplete',
'callback': reverse('identities_ajax_user_lookup')})
if 'assigned' in skip:
del self.fields['assigned']
else:
self.fields['assigned'].help_text = ""
self.fields['project'].label = _("Project")
if 'project' in skip:
del self.fields['project']
else:
self.fields['project'].queryset = Object.filter_permitted(user, Project.objects, mode='x')
self.fields['project'].required = False
self.fields['milestone'].label = _("Milestone")
if 'milestone' in skip:
del self.fields['milestone']
else:
self.fields['milestone'].queryset = Object.filter_permitted(user, Milestone.objects, mode='x')
class Meta:
"FilterForm"
model = Task
fields = ('caller', 'status', 'project', 'milestone', 'assigned')
class TaskRecordForm(ModelForm):
""" TaskRecord form """
def __init__(self, user, *args, **kwargs):
super(TaskRecordForm, self).__init__(*args, **kwargs)
self.fields['body'].required = True
self.fields['body'].label = _("Details")
class Meta:
"TaskRecordForm"
model = UpdateRecord
fields = ['body']
|
|
available = [
'fa-automobile',
'fa-bank',
'fa-behance',
'fa-behance-square',
'fa-bomb',
'fa-building',
'fa-cab',
'fa-car',
'fa-child',
'fa-circle-o-notch',
'fa-circle-thin',
'fa-codepen',
'fa-cube',
'fa-cubes',
'fa-database',
'fa-delicious',
'fa-deviantart',
'fa-digg',
'fa-drupal',
'fa-empire',
'fa-envelope-square',
'fa-fax',
'fa-file-archive-o',
'fa-file-audio-o',
'fa-file-code-o',
'fa-file-excel-o',
'fa-file-image-o',
'fa-file-movie-o',
'fa-file-pdf-o',
'fa-file-photo-o',
'fa-file-picture-o',
'fa-file-powerpoint-o',
'fa-file-sound-o',
'fa-file-video-o',
'fa-file-word-o',
'fa-file-zip-o',
'fa-ge',
'fa-git',
'fa-git-square',
'fa-google',
'fa-graduation-cap',
'fa-hacker-news',
'fa-header',
'fa-history',
'fa-institution',
'fa-joomla',
'fa-jsfiddle',
'fa-language',
'fa-life-bouy',
'fa-life-ring',
'fa-life-saver',
'fa-mortar-board',
'fa-openid',
'fa-paper-plane',
'fa-paper-plane-o',
'fa-paragraph',
'fa-paw',
'fa-pied-piper',
'fa-pied-piper-alt',
'fa-pied-piper-square',
'fa-qq',
'fa-ra',
'fa-rebel',
'fa-recycle',
'fa-reddit',
'fa-reddit-square',
'fa-send',
'fa-send-o',
'fa-share-alt',
'fa-share-alt-square',
'fa-slack',
'fa-sliders',
'fa-soundcloud',
'fa-space-shuttle',
'fa-spoon',
'fa-spotify',
'fa-steam',
'fa-steam-square',
'fa-stumbleupon',
'fa-stumbleupon-circle',
'fa-support',
'fa-taxi',
'fa-tencent-weibo',
'fa-tree',
'fa-university',
'fa-vine',
'fa-wechat',
'fa-weixin',
'fa-wordpress',
'fa-yahoo',
'fa-adjust',
'fa-anchor',
'fa-archive',
'fa-arrows',
'fa-arrows-h',
'fa-arrows-v',
'fa-asterisk',
'fa-automobile',
'fa-ban',
'fa-bank',
'fa-bar-chart-o',
'fa-barcode',
'fa-bars',
'fa-beer',
'fa-bell',
'fa-bell-o',
'fa-bolt',
'fa-bomb',
'fa-book',
'fa-bookmark',
'fa-bookmark-o',
'fa-briefcase',
'fa-bug',
'fa-building',
'fa-building-o',
'fa-bullhorn',
'fa-bullseye',
'fa-cab',
'fa-calendar',
'fa-calendar-o',
'fa-camera',
'fa-camera-retro',
'fa-car',
'fa-caret-square-o-down',
'fa-caret-square-o-left',
'fa-caret-square-o-right',
'fa-caret-square-o-up',
'fa-certificate',
'fa-check',
'fa-check-circle',
'fa-check-circle-o',
'fa-check-square',
'fa-check-square-o',
'fa-child',
'fa-circle',
'fa-circle-o',
'fa-circle-o-notch',
'fa-circle-thin',
'fa-clock-o',
'fa-cloud',
'fa-cloud-download',
'fa-cloud-upload',
'fa-code',
'fa-code-fork',
'fa-coffee',
'fa-cog',
'fa-cogs',
'fa-comment',
'fa-comment-o',
'fa-comments',
'fa-comments-o',
'fa-compass',
'fa-credit-card',
'fa-crop',
'fa-crosshairs',
'fa-cube',
'fa-cubes',
'fa-cutlery',
'fa-dashboard',
'fa-database',
'fa-desktop',
'fa-dot-circle-o',
'fa-download',
'fa-edit',
'fa-ellipsis-h',
'fa-ellipsis-v',
'fa-envelope',
'fa-envelope-o',
'fa-envelope-square',
'fa-eraser',
'fa-exchange',
'fa-exclamation',
'fa-exclamation-circle',
'fa-exclamation-triangle',
'fa-external-link',
'fa-external-link-square',
'fa-eye',
'fa-eye-slash',
'fa-fax',
'fa-female',
'fa-fighter-jet',
'fa-file-archive-o',
'fa-file-audio-o',
'fa-file-code-o',
'fa-file-excel-o',
'fa-file-image-o',
'fa-file-movie-o',
'fa-file-pdf-o',
'fa-file-photo-o',
'fa-file-picture-o',
'fa-file-powerpoint-o',
'fa-file-sound-o',
'fa-file-video-o',
'fa-file-word-o',
'fa-file-zip-o',
'fa-film',
'fa-filter',
'fa-fire',
'fa-fire-extinguisher',
'fa-flag',
'fa-flag-checkered',
'fa-flag-o',
'fa-flash',
'fa-flask',
'fa-folder',
'fa-folder-o',
'fa-folder-open',
'fa-folder-open-o',
'fa-frown-o',
'fa-gamepad',
'fa-gavel',
'fa-gear',
'fa-gears',
'fa-gift',
'fa-glass',
'fa-globe',
'fa-graduation-cap',
'fa-group',
'fa-hdd-o',
'fa-headphones',
'fa-heart',
'fa-heart-o',
'fa-history',
'fa-home',
'fa-image',
'fa-inbox',
'fa-info',
'fa-info-circle',
'fa-institution',
'fa-key',
'fa-keyboard-o',
'fa-language',
'fa-laptop',
'fa-leaf',
'fa-legal',
'fa-lemon-o',
'fa-level-down',
'fa-level-up',
'fa-life-bouy',
'fa-life-ring',
'fa-life-saver',
'fa-lightbulb-o',
'fa-location-arrow',
'fa-lock',
'fa-magic',
'fa-magnet',
'fa-mail-forward',
'fa-mail-reply',
'fa-mail-reply-all',
'fa-male',
'fa-map-marker',
'fa-meh-o',
'fa-microphone',
'fa-microphone-slash',
'fa-minus',
'fa-minus-circle',
'fa-minus-square',
'fa-minus-square-o',
'fa-mobile',
'fa-mobile-phone',
'fa-money',
'fa-moon-o',
'fa-mortar-board',
'fa-music',
'fa-navicon',
'fa-paper-plane',
'fa-paper-plane-o',
'fa-paw',
'fa-pencil',
'fa-pencil-square',
'fa-pencil-square-o',
'fa-phone',
'fa-phone-square',
'fa-photo',
'fa-picture-o',
'fa-plane',
'fa-plus',
'fa-plus-circle',
'fa-plus-square',
'fa-plus-square-o',
'fa-power-off',
'fa-print',
'fa-puzzle-piece',
'fa-qrcode',
'fa-question',
'fa-question-circle',
'fa-quote-left',
'fa-quote-right',
'fa-random',
'fa-recycle',
'fa-refresh',
'fa-reorder',
'fa-reply',
'fa-reply-all',
'fa-retweet',
'fa-road',
'fa-rocket',
'fa-rss',
'fa-rss-square',
'fa-search',
'fa-search-minus',
'fa-search-plus',
'fa-send',
'fa-send-o',
'fa-share',
'fa-share-alt',
'fa-share-alt-square',
'fa-share-square',
'fa-share-square-o',
'fa-shield',
'fa-shopping-cart',
'fa-sign-in',
'fa-sign-out',
'fa-signal',
'fa-sitemap',
'fa-sliders',
'fa-smile-o',
'fa-sort',
'fa-sort-alpha-asc',
'fa-sort-alpha-desc',
'fa-sort-amount-asc',
'fa-sort-amount-desc',
'fa-sort-asc',
'fa-sort-desc',
'fa-sort-down',
'fa-sort-numeric-asc',
'fa-sort-numeric-desc',
'fa-sort-up',
'fa-space-shuttle',
'fa-spinner',
'fa-spoon',
'fa-square',
'fa-square-o',
'fa-star',
'fa-star-half',
'fa-star-half-empty',
'fa-star-half-full',
'fa-star-half-o',
'fa-star-o',
'fa-suitcase',
'fa-sun-o',
'fa-support',
'fa-tablet',
'fa-tachometer',
'fa-tag',
'fa-tags',
'fa-tasks',
'fa-taxi',
'fa-terminal',
'fa-thumb-tack',
'fa-thumbs-down',
'fa-thumbs-o-down',
'fa-thumbs-o-up',
'fa-thumbs-up',
'fa-ticket',
'fa-times',
'fa-times-circle',
'fa-times-circle-o',
'fa-tint',
'fa-toggle-down',
'fa-toggle-left',
'fa-toggle-right',
'fa-toggle-up',
'fa-trash-o',
'fa-tree',
'fa-trophy',
'fa-truck',
'fa-umbrella',
'fa-university',
'fa-unlock',
'fa-unlock-alt',
'fa-unsorted',
'fa-upload',
'fa-user',
'fa-users',
'fa-video-camera',
'fa-volume-down',
'fa-volume-off',
'fa-volume-up',
'fa-warning',
'fa-wheelchair',
'fa-wrench',
'fa-file',
'fa-file-archive-o',
'fa-file-audio-o',
'fa-file-code-o',
'fa-file-excel-o',
'fa-file-image-o',
'fa-file-movie-o',
'fa-file-o',
'fa-file-pdf-o',
'fa-file-photo-o',
'fa-file-picture-o',
'fa-file-powerpoint-o',
'fa-file-sound-o',
'fa-file-text',
'fa-file-text-o',
'fa-file-video-o',
'fa-file-word-o',
'fa-file-zip-o',
'fa-circle-o-notch',
'fa-cog',
'fa-gear',
'fa-refresh',
'fa-spinner',
'fa-check-square',
'fa-check-square-o',
'fa-circle',
'fa-circle-o',
'fa-dot-circle-o',
'fa-minus-square',
'fa-minus-square-o',
'fa-plus-square',
'fa-plus-square-o',
'fa-square',
'fa-square-o',
'fa-bitcoin',
'fa-btc',
'fa-cny',
'fa-dollar',
'fa-eur',
'fa-euro',
'fa-gbp',
'fa-inr',
'fa-jpy',
'fa-krw',
'fa-money',
'fa-rmb',
'fa-rouble',
'fa-rub',
'fa-ruble',
'fa-rupee',
'fa-try',
'fa-turkish-lira',
'fa-usd',
'fa-won',
'fa-yen',
'fa-align-center',
'fa-align-justify',
'fa-align-left',
'fa-align-right',
'fa-bold',
'fa-chain',
'fa-chain-broken',
'fa-clipboard',
'fa-columns',
'fa-copy',
'fa-cut',
'fa-dedent',
'fa-eraser',
'fa-file',
'fa-file-o',
'fa-file-text',
'fa-file-text-o',
'fa-files-o',
'fa-floppy-o',
'fa-font',
'fa-header',
'fa-indent',
'fa-italic',
'fa-link',
'fa-list',
'fa-list-alt',
'fa-list-ol',
'fa-list-ul',
'fa-outdent',
'fa-paperclip',
'fa-paragraph',
'fa-paste',
'fa-repeat',
'fa-rotate-left',
'fa-rotate-right',
'fa-save',
'fa-scissors',
'fa-strikethrough',
'fa-subscript',
'fa-superscript',
'fa-table',
'fa-text-height',
'fa-text-width',
'fa-th',
'fa-th-large',
'fa-th-list',
'fa-underline',
'fa-undo',
'fa-unlink',
'fa-angle-double-down',
'fa-angle-double-left',
'fa-angle-double-right',
'fa-angle-double-up',
'fa-angle-down',
'fa-angle-left',
'fa-angle-right',
'fa-angle-up',
'fa-arrow-circle-down',
'fa-arrow-circle-left',
'fa-arrow-circle-o-down',
'fa-arrow-circle-o-left',
'fa-arrow-circle-o-right',
'fa-arrow-circle-o-up',
'fa-arrow-circle-right',
'fa-arrow-circle-up',
'fa-arrow-down',
'fa-arrow-left',
'fa-arrow-right',
'fa-arrow-up',
'fa-arrows',
'fa-arrows-alt',
'fa-arrows-h',
'fa-arrows-v',
'fa-caret-down',
'fa-caret-left',
'fa-caret-right',
'fa-caret-square-o-down',
'fa-caret-square-o-left',
'fa-caret-square-o-right',
'fa-caret-square-o-up',
'fa-caret-up',
'fa-chevron-circle-down',
'fa-chevron-circle-left',
'fa-chevron-circle-right',
'fa-chevron-circle-up',
'fa-chevron-down',
'fa-chevron-left',
'fa-chevron-right',
'fa-chevron-up',
'fa-hand-o-down',
'fa-hand-o-left',
'fa-hand-o-right',
'fa-hand-o-up',
'fa-long-arrow-down',
'fa-long-arrow-left',
'fa-long-arrow-right',
'fa-long-arrow-up',
'fa-toggle-down',
'fa-toggle-left',
'fa-toggle-right',
'fa-toggle-up',
'fa-arrows-alt',
'fa-backward',
'fa-compress',
'fa-eject',
'fa-expand',
'fa-fast-backward',
'fa-fast-forward',
'fa-forward',
'fa-pause',
'fa-play',
'fa-play-circle',
'fa-play-circle-o',
'fa-step-backward',
'fa-step-forward',
'fa-stop',
'fa-youtube-play',
'fa-adn',
'fa-android',
'fa-apple',
'fa-behance',
'fa-behance-square',
'fa-bitbucket',
'fa-bitbucket-square',
'fa-bitcoin',
'fa-btc',
'fa-codepen',
'fa-css3',
'fa-delicious',
'fa-deviantart',
'fa-digg',
'fa-dribbble',
'fa-dropbox',
'fa-drupal',
'fa-empire',
'fa-facebook',
'fa-facebook-square',
'fa-flickr',
'fa-foursquare',
'fa-ge',
'fa-git',
'fa-git-square',
'fa-github',
'fa-github-alt',
'fa-github-square',
'fa-gittip',
'fa-google',
'fa-google-plus',
'fa-google-plus-square',
'fa-hacker-news',
'fa-html5',
'fa-instagram',
'fa-joomla',
'fa-jsfiddle',
'fa-linkedin',
'fa-linkedin-square',
'fa-linux',
'fa-maxcdn',
'fa-openid',
'fa-pagelines',
'fa-pied-piper',
'fa-pied-piper-alt',
'fa-pied-piper-square',
'fa-pinterest',
'fa-pinterest-square',
'fa-qq',
'fa-ra',
'fa-rebel',
'fa-reddit',
'fa-reddit-square',
'fa-renren',
'fa-share-alt',
'fa-share-alt-square',
'fa-skype',
'fa-slack',
'fa-soundcloud',
'fa-spotify',
'fa-stack-exchange',
'fa-stack-overflow',
'fa-steam',
'fa-steam-square',
'fa-stumbleupon',
'fa-stumbleupon-circle',
'fa-tencent-weibo',
'fa-trello',
'fa-tumblr',
'fa-tumblr-square',
'fa-twitter',
'fa-twitter-square',
'fa-vimeo-square',
'fa-vine',
'fa-vk',
'fa-wechat',
'fa-weibo',
'fa-weixin',
'fa-windows',
'fa-wordpress',
'fa-xing',
'fa-xing-square',
'fa-yahoo',
'fa-youtube',
'fa-youtube-play',
'fa-youtube-square',
'fa-ambulance',
'fa-h-square',
'fa-hospital-o',
'fa-medkit',
'fa-plus-square',
'fa-stethoscope',
'fa-user-md',
'fa-wheelchair'
]
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common settings and connection objects for DigitalOcean Cloud
"""
from libcloud.utils.py3 import httplib
from libcloud.common.base import BaseDriver
from libcloud.common.base import ConnectionUserAndKey, ConnectionKey
from libcloud.common.base import JsonResponse
from libcloud.common.types import InvalidCredsError
__all__ = [
'DigitalOcean_v1_Response',
'DigitalOcean_v1_Connection',
'DigitalOcean_v2_Response',
'DigitalOcean_v2_Connection',
'DigitalOceanBaseDriver'
]
class DigitalOcean_v1_Response(JsonResponse):
def parse_error(self):
if self.status == httplib.FOUND and '/api/error' in self.body:
# Hacky, but DigitalOcean error responses are awful
raise InvalidCredsError(self.body)
elif self.status == httplib.UNAUTHORIZED:
body = self.parse_body()
raise InvalidCredsError(body['message'])
else:
body = self.parse_body()
if 'error_message' in body:
error = '%s (code: %s)' % (body['error_message'], self.status)
else:
error = body
return error
class DigitalOcean_v1_Connection(ConnectionUserAndKey):
"""
Connection class for the DigitalOcean (v1) driver.
"""
host = 'api.digitalocean.com'
responseCls = DigitalOcean_v1_Response
def add_default_params(self, params):
"""
Add parameters that are necessary for every request
This method adds ``client_id`` and ``api_key`` to
the request.
"""
params['client_id'] = self.user_id
params['api_key'] = self.key
return params
class DigitalOcean_v2_Response(JsonResponse):
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
body = self.parse_body()
raise InvalidCredsError(body['message'])
else:
body = self.parse_body()
if 'message' in body:
error = '%s (code: %s)' % (body['message'], self.status)
else:
error = body
return error
def success(self):
return self.status in self.valid_response_codes
class DigitalOcean_v2_Connection(ConnectionKey):
"""
Connection class for the DigitalOcean (v2) driver.
"""
host = 'api.digitalocean.com'
responseCls = DigitalOcean_v2_Response
def add_default_headers(self, headers):
"""
Add headers that are necessary for every request
This method adds ``token`` to the request.
"""
headers['Authorization'] = 'Bearer %s' % (self.key)
headers['Content-Type'] = 'application/json'
return headers
class DigitalOceanConnection(DigitalOcean_v2_Connection):
"""
Connection class for the DigitalOcean driver.
"""
pass
class DigitalOceanResponse(DigitalOcean_v2_Response):
pass
class DigitalOceanBaseDriver(BaseDriver):
"""
DigitalOcean BaseDriver
"""
name = 'DigitalOcean'
website = 'https://www.digitalocean.com'
def __new__(cls, key, secret=None, api_version='v2', **kwargs):
if cls is DigitalOceanBaseDriver:
if api_version == 'v1' or secret is not None:
cls = DigitalOcean_v1_BaseDriver
elif api_version == 'v2':
cls = DigitalOcean_v2_BaseDriver
else:
raise NotImplementedError('Unsupported API version: %s' %
(api_version))
return super(DigitalOceanBaseDriver, cls).__new__(cls, **kwargs)
def ex_account_info(self):
raise NotImplementedError(
'ex_account_info not implemented for this driver')
def ex_list_events(self):
raise NotImplementedError(
'ex_list_events not implemented for this driver')
def ex_get_event(self, event_id):
raise NotImplementedError(
'ex_get_event not implemented for this driver')
def _paginated_request(self, url, obj):
raise NotImplementedError(
'_paginated_requests not implemented for this driver')
class DigitalOcean_v1_BaseDriver(DigitalOceanBaseDriver):
"""
DigitalOcean BaseDriver using v1 of the API.
"""
connectionCls = DigitalOcean_v1_Connection
def ex_get_event(self, event_id):
"""
Get an event object
:param event_id: Event id (required)
:type event_id: ``str``
"""
return self.connection.request('/v1/events/%s' % event_id).object
class DigitalOcean_v2_BaseDriver(DigitalOceanBaseDriver):
"""
DigitalOcean BaseDriver using v2 of the API.
"""
connectionCls = DigitalOcean_v2_Connection
def ex_account_info(self):
return self.connection.request('/v2/account').object['account']
def ex_list_events(self):
return self._paginated_request('/v2/actions', 'actions')
def ex_get_event(self, event_id):
"""
Get an event object
:param event_id: Event id (required)
:type event_id: ``str``
"""
params = {}
return self.connection.request('/v2/actions/%s' % event_id,
params=params).object['action']
def _paginated_request(self, url, obj):
"""
Perform multiple calls in order to have a full list of elements when
the API responses are paginated.
:param url: API endpoint
:type url: ``str``
:param obj: Result object key
:type obj: ``str``
:return: ``list`` of API response objects
"""
params = {}
data = self.connection.request(url)
try:
pages = data.object['links']['pages']['last'].split('=')[-1]
values = data.object[obj]
for page in range(2, int(pages) + 1):
params.update({'page': page})
new_data = self.connection.request(url, params=params)
more_values = new_data.object[obj]
for value in more_values:
values.append(value)
data = values
except KeyError: # No pages.
data = data.object[obj]
return data
|
|
"""Alexa message handlers."""
import logging
import math
from homeassistant import core as ha
from homeassistant.components import (
camera,
cover,
fan,
group,
input_number,
light,
media_player,
timer,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ENTITY_PICTURE,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_LOCK,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_STOP,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_UNLOCK,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_ALARM_DISARMED,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers import network
import homeassistant.util.color as color_util
from homeassistant.util.decorator import Registry
import homeassistant.util.dt as dt_util
from homeassistant.util.temperature import convert as convert_temperature
from .const import (
API_TEMP_UNITS,
API_THERMOSTAT_MODES,
API_THERMOSTAT_MODES_CUSTOM,
API_THERMOSTAT_PRESETS,
PERCENTAGE_FAN_MAP,
Cause,
Inputs,
)
from .entities import async_get_entities
from .errors import (
AlexaInvalidDirectiveError,
AlexaInvalidValueError,
AlexaSecurityPanelAuthorizationRequired,
AlexaSecurityPanelUnauthorizedError,
AlexaTempRangeError,
AlexaUnsupportedThermostatModeError,
AlexaVideoActionNotPermittedForContentError,
)
from .state_report import async_enable_proactive_mode
_LOGGER = logging.getLogger(__name__)
HANDLERS = Registry()
@HANDLERS.register(("Alexa.Discovery", "Discover"))
async def async_api_discovery(hass, config, directive, context):
"""Create a API formatted discovery response.
Async friendly.
"""
discovery_endpoints = [
alexa_entity.serialize_discovery()
for alexa_entity in async_get_entities(hass, config)
if config.should_expose(alexa_entity.entity_id)
]
return directive.response(
name="Discover.Response",
namespace="Alexa.Discovery",
payload={"endpoints": discovery_endpoints},
)
@HANDLERS.register(("Alexa.Authorization", "AcceptGrant"))
async def async_api_accept_grant(hass, config, directive, context):
"""Create a API formatted AcceptGrant response.
Async friendly.
"""
auth_code = directive.payload["grant"]["code"]
_LOGGER.debug("AcceptGrant code: %s", auth_code)
if config.supports_auth:
await config.async_accept_grant(auth_code)
if config.should_report_state:
await async_enable_proactive_mode(hass, config)
return directive.response(
name="AcceptGrant.Response", namespace="Alexa.Authorization", payload={}
)
@HANDLERS.register(("Alexa.PowerController", "TurnOn"))
async def async_api_turn_on(hass, config, directive, context):
"""Process a turn on request."""
entity = directive.entity
domain = entity.domain
if domain == group.DOMAIN:
domain = ha.DOMAIN
service = SERVICE_TURN_ON
if domain == cover.DOMAIN:
service = cover.SERVICE_OPEN_COVER
elif domain == vacuum.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if not supported & vacuum.SUPPORT_TURN_ON and supported & vacuum.SUPPORT_START:
service = vacuum.SERVICE_START
elif domain == timer.DOMAIN:
service = timer.SERVICE_START
elif domain == media_player.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
power_features = media_player.SUPPORT_TURN_ON | media_player.SUPPORT_TURN_OFF
if not supported & power_features:
service = media_player.SERVICE_MEDIA_PLAY
await hass.services.async_call(
domain,
service,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.PowerController", "TurnOff"))
async def async_api_turn_off(hass, config, directive, context):
"""Process a turn off request."""
entity = directive.entity
domain = entity.domain
if entity.domain == group.DOMAIN:
domain = ha.DOMAIN
service = SERVICE_TURN_OFF
if entity.domain == cover.DOMAIN:
service = cover.SERVICE_CLOSE_COVER
elif domain == vacuum.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (
not supported & vacuum.SUPPORT_TURN_OFF
and supported & vacuum.SUPPORT_RETURN_HOME
):
service = vacuum.SERVICE_RETURN_TO_BASE
elif domain == timer.DOMAIN:
service = timer.SERVICE_CANCEL
elif domain == media_player.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
power_features = media_player.SUPPORT_TURN_ON | media_player.SUPPORT_TURN_OFF
if not supported & power_features:
service = media_player.SERVICE_MEDIA_STOP
await hass.services.async_call(
domain,
service,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.BrightnessController", "SetBrightness"))
async def async_api_set_brightness(hass, config, directive, context):
"""Process a set brightness request."""
entity = directive.entity
brightness = int(directive.payload["brightness"])
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_BRIGHTNESS_PCT: brightness},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.BrightnessController", "AdjustBrightness"))
async def async_api_adjust_brightness(hass, config, directive, context):
"""Process an adjust brightness request."""
entity = directive.entity
brightness_delta = int(directive.payload["brightnessDelta"])
# read current state
try:
current = math.floor(
int(entity.attributes.get(light.ATTR_BRIGHTNESS)) / 255 * 100
)
except ZeroDivisionError:
current = 0
# set brightness
brightness = max(0, brightness_delta + current)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_BRIGHTNESS_PCT: brightness},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorController", "SetColor"))
async def async_api_set_color(hass, config, directive, context):
"""Process a set color request."""
entity = directive.entity
rgb = color_util.color_hsb_to_RGB(
float(directive.payload["color"]["hue"]),
float(directive.payload["color"]["saturation"]),
float(directive.payload["color"]["brightness"]),
)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_RGB_COLOR: rgb},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorTemperatureController", "SetColorTemperature"))
async def async_api_set_color_temperature(hass, config, directive, context):
"""Process a set color temperature request."""
entity = directive.entity
kelvin = int(directive.payload["colorTemperatureInKelvin"])
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_KELVIN: kelvin},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorTemperatureController", "DecreaseColorTemperature"))
async def async_api_decrease_color_temp(hass, config, directive, context):
"""Process a decrease color temperature request."""
entity = directive.entity
current = int(entity.attributes.get(light.ATTR_COLOR_TEMP))
max_mireds = int(entity.attributes.get(light.ATTR_MAX_MIREDS))
value = min(max_mireds, current + 50)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_COLOR_TEMP: value},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorTemperatureController", "IncreaseColorTemperature"))
async def async_api_increase_color_temp(hass, config, directive, context):
"""Process an increase color temperature request."""
entity = directive.entity
current = int(entity.attributes.get(light.ATTR_COLOR_TEMP))
min_mireds = int(entity.attributes.get(light.ATTR_MIN_MIREDS))
value = max(min_mireds, current - 50)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_COLOR_TEMP: value},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.SceneController", "Activate"))
async def async_api_activate(hass, config, directive, context):
"""Process an activate request."""
entity = directive.entity
domain = entity.domain
await hass.services.async_call(
domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
payload = {
"cause": {"type": Cause.VOICE_INTERACTION},
"timestamp": f"{dt_util.utcnow().replace(tzinfo=None).isoformat()}Z",
}
return directive.response(
name="ActivationStarted", namespace="Alexa.SceneController", payload=payload
)
@HANDLERS.register(("Alexa.SceneController", "Deactivate"))
async def async_api_deactivate(hass, config, directive, context):
"""Process a deactivate request."""
entity = directive.entity
domain = entity.domain
await hass.services.async_call(
domain,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
payload = {
"cause": {"type": Cause.VOICE_INTERACTION},
"timestamp": f"{dt_util.utcnow().replace(tzinfo=None).isoformat()}Z",
}
return directive.response(
name="DeactivationStarted", namespace="Alexa.SceneController", payload=payload
)
@HANDLERS.register(("Alexa.PercentageController", "SetPercentage"))
async def async_api_set_percentage(hass, config, directive, context):
"""Process a set percentage request."""
entity = directive.entity
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = "off"
percentage = int(directive.payload["percentage"])
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
elif percentage <= 100:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PercentageController", "AdjustPercentage"))
async def async_api_adjust_percentage(hass, config, directive, context):
"""Process an adjust percentage request."""
entity = directive.entity
percentage_delta = int(directive.payload["percentageDelta"])
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = entity.attributes.get(fan.ATTR_SPEED)
current = PERCENTAGE_FAN_MAP.get(speed, 100)
# set percentage
percentage = max(0, percentage_delta + current)
speed = "off"
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
elif percentage <= 100:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.LockController", "Lock"))
async def async_api_lock(hass, config, directive, context):
"""Process a lock request."""
entity = directive.entity
await hass.services.async_call(
entity.domain,
SERVICE_LOCK,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
response = directive.response()
response.add_context_property(
{"name": "lockState", "namespace": "Alexa.LockController", "value": "LOCKED"}
)
return response
@HANDLERS.register(("Alexa.LockController", "Unlock"))
async def async_api_unlock(hass, config, directive, context):
"""Process an unlock request."""
if config.locale not in {"de-DE", "en-US", "ja-JP"}:
msg = f"The unlock directive is not supported for the following locales: {config.locale}"
raise AlexaInvalidDirectiveError(msg)
entity = directive.entity
await hass.services.async_call(
entity.domain,
SERVICE_UNLOCK,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
response = directive.response()
response.add_context_property(
{"namespace": "Alexa.LockController", "name": "lockState", "value": "UNLOCKED"}
)
return response
@HANDLERS.register(("Alexa.Speaker", "SetVolume"))
async def async_api_set_volume(hass, config, directive, context):
"""Process a set volume request."""
volume = round(float(directive.payload["volume"] / 100), 2)
entity = directive.entity
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_VOLUME_LEVEL: volume,
}
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_SET, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.InputController", "SelectInput"))
async def async_api_select_input(hass, config, directive, context):
"""Process a set input request."""
media_input = directive.payload["input"]
entity = directive.entity
# Attempt to map the ALL UPPERCASE payload name to a source.
# Strips trailing 1 to match single input devices.
source_list = entity.attributes.get(media_player.const.ATTR_INPUT_SOURCE_LIST, [])
for source in source_list:
formatted_source = (
source.lower().replace("-", "").replace("_", "").replace(" ", "")
)
media_input = media_input.lower().replace(" ", "")
if (
formatted_source in Inputs.VALID_SOURCE_NAME_MAP
and formatted_source == media_input
) or (
media_input.endswith("1") and formatted_source == media_input.rstrip("1")
):
media_input = source
break
else:
msg = (
f"failed to map input {media_input} to a media source on {entity.entity_id}"
)
raise AlexaInvalidValueError(msg)
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_INPUT_SOURCE: media_input,
}
await hass.services.async_call(
entity.domain,
media_player.SERVICE_SELECT_SOURCE,
data,
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.Speaker", "AdjustVolume"))
async def async_api_adjust_volume(hass, config, directive, context):
"""Process an adjust volume request."""
volume_delta = int(directive.payload["volume"])
entity = directive.entity
current_level = entity.attributes.get(media_player.const.ATTR_MEDIA_VOLUME_LEVEL)
# read current state
try:
current = math.floor(int(current_level * 100))
except ZeroDivisionError:
current = 0
volume = float(max(0, volume_delta + current) / 100)
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_VOLUME_LEVEL: volume,
}
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_SET, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.StepSpeaker", "AdjustVolume"))
async def async_api_adjust_volume_step(hass, config, directive, context):
"""Process an adjust volume step request."""
# media_player volume up/down service does not support specifying steps
# each component handles it differently e.g. via config.
# This workaround will simply call the volume up/Volume down the amount of steps asked for
# When no steps are called in the request, Alexa sends a default of 10 steps which for most
# purposes is too high. The default is set 1 in this case.
entity = directive.entity
volume_int = int(directive.payload["volumeSteps"])
is_default = bool(directive.payload["volumeStepsDefault"])
default_steps = 1
if volume_int < 0:
service_volume = SERVICE_VOLUME_DOWN
if is_default:
volume_int = -default_steps
else:
service_volume = SERVICE_VOLUME_UP
if is_default:
volume_int = default_steps
data = {ATTR_ENTITY_ID: entity.entity_id}
for _ in range(abs(volume_int)):
await hass.services.async_call(
entity.domain, service_volume, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.StepSpeaker", "SetMute"))
@HANDLERS.register(("Alexa.Speaker", "SetMute"))
async def async_api_set_mute(hass, config, directive, context):
"""Process a set mute request."""
mute = bool(directive.payload["mute"])
entity = directive.entity
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_VOLUME_MUTED: mute,
}
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_MUTE, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Play"))
async def async_api_play(hass, config, directive, context):
"""Process a play request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_PLAY, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Pause"))
async def async_api_pause(hass, config, directive, context):
"""Process a pause request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_PAUSE, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Stop"))
async def async_api_stop(hass, config, directive, context):
"""Process a stop request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_STOP, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Next"))
async def async_api_next(hass, config, directive, context):
"""Process a next request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_NEXT_TRACK, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Previous"))
async def async_api_previous(hass, config, directive, context):
"""Process a previous request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain,
SERVICE_MEDIA_PREVIOUS_TRACK,
data,
blocking=False,
context=context,
)
return directive.response()
def temperature_from_object(hass, temp_obj, interval=False):
"""Get temperature from Temperature object in requested unit."""
to_unit = hass.config.units.temperature_unit
from_unit = TEMP_CELSIUS
temp = float(temp_obj["value"])
if temp_obj["scale"] == "FAHRENHEIT":
from_unit = TEMP_FAHRENHEIT
elif temp_obj["scale"] == "KELVIN":
# convert to Celsius if absolute temperature
if not interval:
temp -= 273.15
return convert_temperature(temp, from_unit, to_unit, interval)
@HANDLERS.register(("Alexa.ThermostatController", "SetTargetTemperature"))
async def async_api_set_target_temp(hass, config, directive, context):
"""Process a set target temperature request."""
entity = directive.entity
min_temp = entity.attributes.get(climate.ATTR_MIN_TEMP)
max_temp = entity.attributes.get(climate.ATTR_MAX_TEMP)
unit = hass.config.units.temperature_unit
data = {ATTR_ENTITY_ID: entity.entity_id}
payload = directive.payload
response = directive.response()
if "targetSetpoint" in payload:
temp = temperature_from_object(hass, payload["targetSetpoint"])
if temp < min_temp or temp > max_temp:
raise AlexaTempRangeError(hass, temp, min_temp, max_temp)
data[ATTR_TEMPERATURE] = temp
response.add_context_property(
{
"name": "targetSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": temp, "scale": API_TEMP_UNITS[unit]},
}
)
if "lowerSetpoint" in payload:
temp_low = temperature_from_object(hass, payload["lowerSetpoint"])
if temp_low < min_temp or temp_low > max_temp:
raise AlexaTempRangeError(hass, temp_low, min_temp, max_temp)
data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
response.add_context_property(
{
"name": "lowerSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": temp_low, "scale": API_TEMP_UNITS[unit]},
}
)
if "upperSetpoint" in payload:
temp_high = temperature_from_object(hass, payload["upperSetpoint"])
if temp_high < min_temp or temp_high > max_temp:
raise AlexaTempRangeError(hass, temp_high, min_temp, max_temp)
data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
response.add_context_property(
{
"name": "upperSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": temp_high, "scale": API_TEMP_UNITS[unit]},
}
)
await hass.services.async_call(
entity.domain,
climate.SERVICE_SET_TEMPERATURE,
data,
blocking=False,
context=context,
)
return response
@HANDLERS.register(("Alexa.ThermostatController", "AdjustTargetTemperature"))
async def async_api_adjust_target_temp(hass, config, directive, context):
"""Process an adjust target temperature request."""
entity = directive.entity
min_temp = entity.attributes.get(climate.ATTR_MIN_TEMP)
max_temp = entity.attributes.get(climate.ATTR_MAX_TEMP)
unit = hass.config.units.temperature_unit
temp_delta = temperature_from_object(
hass, directive.payload["targetSetpointDelta"], interval=True
)
target_temp = float(entity.attributes.get(ATTR_TEMPERATURE)) + temp_delta
if target_temp < min_temp or target_temp > max_temp:
raise AlexaTempRangeError(hass, target_temp, min_temp, max_temp)
data = {ATTR_ENTITY_ID: entity.entity_id, ATTR_TEMPERATURE: target_temp}
response = directive.response()
await hass.services.async_call(
entity.domain,
climate.SERVICE_SET_TEMPERATURE,
data,
blocking=False,
context=context,
)
response.add_context_property(
{
"name": "targetSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": target_temp, "scale": API_TEMP_UNITS[unit]},
}
)
return response
@HANDLERS.register(("Alexa.ThermostatController", "SetThermostatMode"))
async def async_api_set_thermostat_mode(hass, config, directive, context):
"""Process a set thermostat mode request."""
entity = directive.entity
mode = directive.payload["thermostatMode"]
mode = mode if isinstance(mode, str) else mode["value"]
data = {ATTR_ENTITY_ID: entity.entity_id}
ha_preset = next((k for k, v in API_THERMOSTAT_PRESETS.items() if v == mode), None)
if ha_preset:
presets = entity.attributes.get(climate.ATTR_PRESET_MODES, [])
if ha_preset not in presets:
msg = f"The requested thermostat mode {ha_preset} is not supported"
raise AlexaUnsupportedThermostatModeError(msg)
service = climate.SERVICE_SET_PRESET_MODE
data[climate.ATTR_PRESET_MODE] = ha_preset
elif mode == "CUSTOM":
operation_list = entity.attributes.get(climate.ATTR_HVAC_MODES)
custom_mode = directive.payload["thermostatMode"]["customName"]
custom_mode = next(
(k for k, v in API_THERMOSTAT_MODES_CUSTOM.items() if v == custom_mode),
None,
)
if custom_mode not in operation_list:
msg = (
f"The requested thermostat mode {mode}: {custom_mode} is not supported"
)
raise AlexaUnsupportedThermostatModeError(msg)
service = climate.SERVICE_SET_HVAC_MODE
data[climate.ATTR_HVAC_MODE] = custom_mode
else:
operation_list = entity.attributes.get(climate.ATTR_HVAC_MODES)
ha_modes = {k: v for k, v in API_THERMOSTAT_MODES.items() if v == mode}
ha_mode = next(iter(set(ha_modes).intersection(operation_list)), None)
if ha_mode not in operation_list:
msg = f"The requested thermostat mode {mode} is not supported"
raise AlexaUnsupportedThermostatModeError(msg)
service = climate.SERVICE_SET_HVAC_MODE
data[climate.ATTR_HVAC_MODE] = ha_mode
response = directive.response()
await hass.services.async_call(
climate.DOMAIN, service, data, blocking=False, context=context
)
response.add_context_property(
{
"name": "thermostatMode",
"namespace": "Alexa.ThermostatController",
"value": mode,
}
)
return response
@HANDLERS.register(("Alexa", "ReportState"))
async def async_api_reportstate(hass, config, directive, context):
"""Process a ReportState request."""
return directive.response(name="StateReport")
@HANDLERS.register(("Alexa.PowerLevelController", "SetPowerLevel"))
async def async_api_set_power_level(hass, config, directive, context):
"""Process a SetPowerLevel request."""
entity = directive.entity
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = "off"
percentage = int(directive.payload["powerLevel"])
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
else:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PowerLevelController", "AdjustPowerLevel"))
async def async_api_adjust_power_level(hass, config, directive, context):
"""Process an AdjustPowerLevel request."""
entity = directive.entity
percentage_delta = int(directive.payload["powerLevelDelta"])
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = entity.attributes.get(fan.ATTR_SPEED)
current = PERCENTAGE_FAN_MAP.get(speed, 100)
# set percentage
percentage = max(0, percentage_delta + current)
speed = "off"
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
else:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.SecurityPanelController", "Arm"))
async def async_api_arm(hass, config, directive, context):
"""Process a Security Panel Arm request."""
entity = directive.entity
service = None
arm_state = directive.payload["armState"]
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.state != STATE_ALARM_DISARMED:
msg = "You must disarm the system before you can set the requested arm state."
raise AlexaSecurityPanelAuthorizationRequired(msg)
if arm_state == "ARMED_AWAY":
service = SERVICE_ALARM_ARM_AWAY
elif arm_state == "ARMED_NIGHT":
service = SERVICE_ALARM_ARM_NIGHT
elif arm_state == "ARMED_STAY":
service = SERVICE_ALARM_ARM_HOME
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
# return 0 until alarm integration supports an exit delay
payload = {"exitDelayInSeconds": 0}
response = directive.response(
name="Arm.Response", namespace="Alexa.SecurityPanelController", payload=payload
)
response.add_context_property(
{
"name": "armState",
"namespace": "Alexa.SecurityPanelController",
"value": arm_state,
}
)
return response
@HANDLERS.register(("Alexa.SecurityPanelController", "Disarm"))
async def async_api_disarm(hass, config, directive, context):
"""Process a Security Panel Disarm request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
response = directive.response()
# Per Alexa Documentation: If you receive a Disarm directive, and the system is already disarmed,
# respond with a success response, not an error response.
if entity.state == STATE_ALARM_DISARMED:
return response
payload = directive.payload
if "authorization" in payload:
value = payload["authorization"]["value"]
if payload["authorization"]["type"] == "FOUR_DIGIT_PIN":
data["code"] = value
if not await hass.services.async_call(
entity.domain, SERVICE_ALARM_DISARM, data, blocking=True, context=context
):
msg = "Invalid Code"
raise AlexaSecurityPanelUnauthorizedError(msg)
response.add_context_property(
{
"name": "armState",
"namespace": "Alexa.SecurityPanelController",
"value": "DISARMED",
}
)
return response
@HANDLERS.register(("Alexa.ModeController", "SetMode"))
async def async_api_set_mode(hass, config, directive, context):
"""Process a SetMode directive."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
mode = directive.payload["mode"]
# Fan Direction
if instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
_, direction = mode.split(".")
if direction in (fan.DIRECTION_REVERSE, fan.DIRECTION_FORWARD):
service = fan.SERVICE_SET_DIRECTION
data[fan.ATTR_DIRECTION] = direction
# Cover Position
elif instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
_, position = mode.split(".")
if position == cover.STATE_CLOSED:
service = cover.SERVICE_CLOSE_COVER
elif position == cover.STATE_OPEN:
service = cover.SERVICE_OPEN_COVER
elif position == "custom":
service = cover.SERVICE_STOP_COVER
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ModeController",
"instance": instance,
"name": "mode",
"value": mode,
}
)
return response
@HANDLERS.register(("Alexa.ModeController", "AdjustMode"))
async def async_api_adjust_mode(hass, config, directive, context):
"""Process a AdjustMode request.
Requires capabilityResources supportedModes to be ordered.
Only supportedModes with ordered=True support the adjustMode directive.
"""
# Currently no supportedModes are configured with ordered=True to support this request.
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
@HANDLERS.register(("Alexa.ToggleController", "TurnOn"))
async def async_api_toggle_on(hass, config, directive, context):
"""Process a toggle on request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
# Fan Oscillating
if instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
service = fan.SERVICE_OSCILLATE
data[fan.ATTR_OSCILLATING] = True
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ToggleController",
"instance": instance,
"name": "toggleState",
"value": "ON",
}
)
return response
@HANDLERS.register(("Alexa.ToggleController", "TurnOff"))
async def async_api_toggle_off(hass, config, directive, context):
"""Process a toggle off request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
# Fan Oscillating
if instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
service = fan.SERVICE_OSCILLATE
data[fan.ATTR_OSCILLATING] = False
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ToggleController",
"instance": instance,
"name": "toggleState",
"value": "OFF",
}
)
return response
@HANDLERS.register(("Alexa.RangeController", "SetRangeValue"))
async def async_api_set_range(hass, config, directive, context):
"""Process a next request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
range_value = directive.payload["rangeValue"]
# Fan Speed
if instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
range_value = int(range_value)
service = fan.SERVICE_SET_SPEED
speed_list = entity.attributes[fan.ATTR_SPEED_LIST]
speed = next((v for i, v in enumerate(speed_list) if i == range_value), None)
if not speed:
msg = "Entity does not support value"
raise AlexaInvalidValueError(msg)
if speed == fan.SPEED_OFF:
service = fan.SERVICE_TURN_OFF
data[fan.ATTR_SPEED] = speed
# Cover Position
elif instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
range_value = int(range_value)
if range_value == 0:
service = cover.SERVICE_CLOSE_COVER
elif range_value == 100:
service = cover.SERVICE_OPEN_COVER
else:
service = cover.SERVICE_SET_COVER_POSITION
data[cover.ATTR_POSITION] = range_value
# Cover Tilt
elif instance == f"{cover.DOMAIN}.tilt":
range_value = int(range_value)
if range_value == 0:
service = cover.SERVICE_CLOSE_COVER_TILT
elif range_value == 100:
service = cover.SERVICE_OPEN_COVER_TILT
else:
service = cover.SERVICE_SET_COVER_TILT_POSITION
data[cover.ATTR_TILT_POSITION] = range_value
# Input Number Value
elif instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
range_value = float(range_value)
service = input_number.SERVICE_SET_VALUE
min_value = float(entity.attributes[input_number.ATTR_MIN])
max_value = float(entity.attributes[input_number.ATTR_MAX])
data[input_number.ATTR_VALUE] = min(max_value, max(min_value, range_value))
# Vacuum Fan Speed
elif instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
service = vacuum.SERVICE_SET_FAN_SPEED
speed_list = entity.attributes[vacuum.ATTR_FAN_SPEED_LIST]
speed = next(
(v for i, v in enumerate(speed_list) if i == int(range_value)), None
)
if not speed:
msg = "Entity does not support value"
raise AlexaInvalidValueError(msg)
data[vacuum.ATTR_FAN_SPEED] = speed
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.RangeController",
"instance": instance,
"name": "rangeValue",
"value": range_value,
}
)
return response
@HANDLERS.register(("Alexa.RangeController", "AdjustRangeValue"))
async def async_api_adjust_range(hass, config, directive, context):
"""Process a next request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
range_delta = directive.payload["rangeValueDelta"]
range_delta_default = bool(directive.payload["rangeValueDeltaDefault"])
response_value = 0
# Fan Speed
if instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
range_delta = int(range_delta)
service = fan.SERVICE_SET_SPEED
speed_list = entity.attributes[fan.ATTR_SPEED_LIST]
current_speed = entity.attributes[fan.ATTR_SPEED]
current_speed_index = next(
(i for i, v in enumerate(speed_list) if v == current_speed), 0
)
new_speed_index = min(
len(speed_list) - 1, max(0, current_speed_index + range_delta)
)
speed = next(
(v for i, v in enumerate(speed_list) if i == new_speed_index), None
)
if speed == fan.SPEED_OFF:
service = fan.SERVICE_TURN_OFF
data[fan.ATTR_SPEED] = response_value = speed
# Cover Position
elif instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
range_delta = int(range_delta * 20) if range_delta_default else int(range_delta)
service = SERVICE_SET_COVER_POSITION
current = entity.attributes.get(cover.ATTR_POSITION)
if not current:
msg = f"Unable to determine {entity.entity_id} current position"
raise AlexaInvalidValueError(msg)
position = response_value = min(100, max(0, range_delta + current))
if position == 100:
service = cover.SERVICE_OPEN_COVER
elif position == 0:
service = cover.SERVICE_CLOSE_COVER
else:
data[cover.ATTR_POSITION] = position
# Cover Tilt
elif instance == f"{cover.DOMAIN}.tilt":
range_delta = int(range_delta * 20) if range_delta_default else int(range_delta)
service = SERVICE_SET_COVER_TILT_POSITION
current = entity.attributes.get(cover.ATTR_TILT_POSITION)
if not current:
msg = f"Unable to determine {entity.entity_id} current tilt position"
raise AlexaInvalidValueError(msg)
tilt_position = response_value = min(100, max(0, range_delta + current))
if tilt_position == 100:
service = cover.SERVICE_OPEN_COVER_TILT
elif tilt_position == 0:
service = cover.SERVICE_CLOSE_COVER_TILT
else:
data[cover.ATTR_TILT_POSITION] = tilt_position
# Input Number Value
elif instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
range_delta = float(range_delta)
service = input_number.SERVICE_SET_VALUE
min_value = float(entity.attributes[input_number.ATTR_MIN])
max_value = float(entity.attributes[input_number.ATTR_MAX])
current = float(entity.state)
data[input_number.ATTR_VALUE] = response_value = min(
max_value, max(min_value, range_delta + current)
)
# Vacuum Fan Speed
elif instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
range_delta = int(range_delta)
service = vacuum.SERVICE_SET_FAN_SPEED
speed_list = entity.attributes[vacuum.ATTR_FAN_SPEED_LIST]
current_speed = entity.attributes[vacuum.ATTR_FAN_SPEED]
current_speed_index = next(
(i for i, v in enumerate(speed_list) if v == current_speed), 0
)
new_speed_index = min(
len(speed_list) - 1, max(0, current_speed_index + range_delta)
)
speed = next(
(v for i, v in enumerate(speed_list) if i == new_speed_index), None
)
data[vacuum.ATTR_FAN_SPEED] = response_value = speed
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.RangeController",
"instance": instance,
"name": "rangeValue",
"value": response_value,
}
)
return response
@HANDLERS.register(("Alexa.ChannelController", "ChangeChannel"))
async def async_api_changechannel(hass, config, directive, context):
"""Process a change channel request."""
channel = "0"
entity = directive.entity
channel_payload = directive.payload["channel"]
metadata_payload = directive.payload["channelMetadata"]
payload_name = "number"
if "number" in channel_payload:
channel = channel_payload["number"]
payload_name = "number"
elif "callSign" in channel_payload:
channel = channel_payload["callSign"]
payload_name = "callSign"
elif "affiliateCallSign" in channel_payload:
channel = channel_payload["affiliateCallSign"]
payload_name = "affiliateCallSign"
elif "uri" in channel_payload:
channel = channel_payload["uri"]
payload_name = "uri"
elif "name" in metadata_payload:
channel = metadata_payload["name"]
payload_name = "callSign"
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_CONTENT_ID: channel,
media_player.const.ATTR_MEDIA_CONTENT_TYPE: media_player.const.MEDIA_TYPE_CHANNEL,
}
await hass.services.async_call(
entity.domain,
media_player.const.SERVICE_PLAY_MEDIA,
data,
blocking=False,
context=context,
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ChannelController",
"name": "channel",
"value": {payload_name: channel},
}
)
return response
@HANDLERS.register(("Alexa.ChannelController", "SkipChannels"))
async def async_api_skipchannel(hass, config, directive, context):
"""Process a skipchannel request."""
channel = int(directive.payload["channelCount"])
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
if channel < 0:
service_media = SERVICE_MEDIA_PREVIOUS_TRACK
else:
service_media = SERVICE_MEDIA_NEXT_TRACK
for _ in range(abs(channel)):
await hass.services.async_call(
entity.domain, service_media, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ChannelController",
"name": "channel",
"value": {"number": ""},
}
)
return response
@HANDLERS.register(("Alexa.SeekController", "AdjustSeekPosition"))
async def async_api_seek(hass, config, directive, context):
"""Process a seek request."""
entity = directive.entity
position_delta = int(directive.payload["deltaPositionMilliseconds"])
current_position = entity.attributes.get(media_player.ATTR_MEDIA_POSITION)
if not current_position:
msg = f"{entity} did not return the current media position."
raise AlexaVideoActionNotPermittedForContentError(msg)
seek_position = int(current_position) + int(position_delta / 1000)
if seek_position < 0:
seek_position = 0
media_duration = entity.attributes.get(media_player.ATTR_MEDIA_DURATION)
if media_duration and 0 < int(media_duration) < seek_position:
seek_position = media_duration
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.ATTR_MEDIA_SEEK_POSITION: seek_position,
}
await hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_MEDIA_SEEK,
data,
blocking=False,
context=context,
)
# convert seconds to milliseconds for StateReport.
seek_position = int(seek_position * 1000)
payload = {"properties": [{"name": "positionMilliseconds", "value": seek_position}]}
return directive.response(
name="StateReport", namespace="Alexa.SeekController", payload=payload
)
@HANDLERS.register(("Alexa.EqualizerController", "SetMode"))
async def async_api_set_eq_mode(hass, config, directive, context):
"""Process a SetMode request for EqualizerController."""
mode = directive.payload["mode"]
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
sound_mode_list = entity.attributes.get(media_player.const.ATTR_SOUND_MODE_LIST)
if sound_mode_list and mode.lower() in sound_mode_list:
data[media_player.const.ATTR_SOUND_MODE] = mode.lower()
else:
msg = f"failed to map sound mode {mode} to a mode on {entity.entity_id}"
raise AlexaInvalidValueError(msg)
await hass.services.async_call(
entity.domain,
media_player.SERVICE_SELECT_SOUND_MODE,
data,
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.EqualizerController", "AdjustBands"))
@HANDLERS.register(("Alexa.EqualizerController", "ResetBands"))
@HANDLERS.register(("Alexa.EqualizerController", "SetBands"))
async def async_api_bands_directive(hass, config, directive, context):
"""Handle an AdjustBands, ResetBands, SetBands request.
Only mode directives are currently supported for the EqualizerController.
"""
# Currently bands directives are not supported.
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
@HANDLERS.register(("Alexa.TimeHoldController", "Hold"))
async def async_api_hold(hass, config, directive, context):
"""Process a TimeHoldController Hold request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == timer.DOMAIN:
service = timer.SERVICE_PAUSE
elif entity.domain == vacuum.DOMAIN:
service = vacuum.SERVICE_START_PAUSE
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.TimeHoldController", "Resume"))
async def async_api_resume(hass, config, directive, context):
"""Process a TimeHoldController Resume request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == timer.DOMAIN:
service = timer.SERVICE_START
elif entity.domain == vacuum.DOMAIN:
service = vacuum.SERVICE_START_PAUSE
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.CameraStreamController", "InitializeCameraStreams"))
async def async_api_initialize_camera_stream(hass, config, directive, context):
"""Process a InitializeCameraStreams request."""
entity = directive.entity
stream_source = await camera.async_request_stream(hass, entity.entity_id, fmt="hls")
camera_image = hass.states.get(entity.entity_id).attributes[ATTR_ENTITY_PICTURE]
try:
external_url = network.get_url(
hass,
allow_internal=False,
allow_ip=False,
require_ssl=True,
require_standard_port=True,
)
except network.NoURLAvailableError as err:
raise AlexaInvalidValueError(
"Failed to find suitable URL to serve to Alexa"
) from err
payload = {
"cameraStreams": [
{
"uri": f"{external_url}{stream_source}",
"protocol": "HLS",
"resolution": {"width": 1280, "height": 720},
"authorizationType": "NONE",
"videoCodec": "H264",
"audioCodec": "AAC",
}
],
"imageUri": f"{external_url}{camera_image}",
}
return directive.response(
name="Response", namespace="Alexa.CameraStreamController", payload=payload
)
|
|
#!/usr/bin/env python
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import sys
import tempfile
import urllib2
from common_includes import *
TRUNKBRANCH = "TRUNKBRANCH"
CHROMIUM = "CHROMIUM"
DEPS_FILE = "DEPS_FILE"
CONFIG = {
BRANCHNAME: "prepare-push",
TRUNKBRANCH: "trunk-push",
PERSISTFILE_BASENAME: "/tmp/v8-push-to-trunk-tempfile",
TEMP_BRANCH: "prepare-push-temporary-branch-created-by-script",
DOT_GIT_LOCATION: ".git",
VERSION_FILE: "src/version.cc",
CHANGELOG_FILE: "ChangeLog",
CHANGELOG_ENTRY_FILE: "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
PATCH_FILE: "/tmp/v8-push-to-trunk-tempfile-patch-file",
COMMITMSG_FILE: "/tmp/v8-push-to-trunk-tempfile-commitmsg",
DEPS_FILE: "DEPS",
}
class PushToTrunkOptions(CommonOptions):
def __init__(self, options):
super(PushToTrunkOptions, self).__init__(options, options.m)
self.requires_editor = not options.f
self.wait_for_lgtm = not options.f
self.tbr_commit = not options.m
self.l = options.l
self.r = options.r
self.c = options.c
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
self.InitialEnvironmentChecks()
self.CommonPrepare()
self.PrepareBranch()
self.DeleteBranch(self.Config(TRUNKBRANCH))
class FreshBranch(Step):
MESSAGE = "Create a fresh branch."
def RunStep(self):
args = "checkout -b %s svn/bleeding_edge" % self.Config(BRANCHNAME)
if self.Git(args) is None:
self.Die("Creating branch %s failed." % self.Config(BRANCHNAME))
class DetectLastPush(Step):
MESSAGE = "Detect commit ID of last push to trunk."
def RunStep(self):
last_push = (self._options.l or
self.Git("log -1 --format=%H ChangeLog").strip())
while True:
# Print assumed commit, circumventing git's pager.
print self.Git("log -1 %s" % last_push)
if self.Confirm("Is the commit printed above the last push to trunk?"):
break
args = "log -1 --format=%H %s^ ChangeLog" % last_push
last_push = self.Git(args).strip()
self.Persist("last_push", last_push)
self._state["last_push"] = last_push
class PrepareChangeLog(Step):
MESSAGE = "Prepare raw ChangeLog entry."
def Reload(self, body):
"""Attempts to reload the commit message from rietveld in order to allow
late changes to the LOG flag. Note: This is brittle to future changes of
the web page name or structure.
"""
match = re.search(r"^Review URL: https://codereview\.chromium\.org/(\d+)$",
body, flags=re.M)
if match:
cl_url = "https://codereview.chromium.org/%s/description" % match.group(1)
try:
# Fetch from Rietveld but only retry once with one second delay since
# there might be many revisions.
body = self.ReadURL(cl_url, wait_plan=[1])
except urllib2.URLError:
pass
return body
def RunStep(self):
self.RestoreIfUnset("last_push")
# These version numbers are used again later for the trunk commit.
self.ReadAndPersistVersion()
date = self.GetDate()
self.Persist("date", date)
output = "%s: Version %s.%s.%s\n\n" % (date,
self._state["major"],
self._state["minor"],
self._state["build"])
TextToFile(output, self.Config(CHANGELOG_ENTRY_FILE))
args = "log %s..HEAD --format=%%H" % self._state["last_push"]
commits = self.Git(args).strip()
# Cache raw commit messages.
commit_messages = [
[
self.Git("log -1 %s --format=\"%%s\"" % commit),
self.Reload(self.Git("log -1 %s --format=\"%%B\"" % commit)),
self.Git("log -1 %s --format=\"%%an\"" % commit),
] for commit in commits.splitlines()
]
# Auto-format commit messages.
body = MakeChangeLogBody(commit_messages, auto_format=True)
AppendToFile(body, self.Config(CHANGELOG_ENTRY_FILE))
msg = (" Performance and stability improvements on all platforms."
"\n#\n# The change log above is auto-generated. Please review if "
"all relevant\n# commit messages from the list below are included."
"\n# All lines starting with # will be stripped.\n#\n")
AppendToFile(msg, self.Config(CHANGELOG_ENTRY_FILE))
# Include unformatted commit messages as a reference in a comment.
comment_body = MakeComment(MakeChangeLogBody(commit_messages))
AppendToFile(comment_body, self.Config(CHANGELOG_ENTRY_FILE))
class EditChangeLog(Step):
MESSAGE = "Edit ChangeLog entry."
def RunStep(self):
print ("Please press <Return> to have your EDITOR open the ChangeLog "
"entry, then edit its contents to your liking. When you're done, "
"save the file and exit your EDITOR. ")
self.ReadLine(default="")
self.Editor(self.Config(CHANGELOG_ENTRY_FILE))
handle, new_changelog = tempfile.mkstemp()
os.close(handle)
# Strip comments and reformat with correct indentation.
changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE)).rstrip()
changelog_entry = StripComments(changelog_entry)
changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines()))
changelog_entry = changelog_entry.lstrip()
if changelog_entry == "":
self.Die("Empty ChangeLog entry.")
with open(new_changelog, "w") as f:
f.write(changelog_entry)
f.write("\n\n\n") # Explicitly insert two empty lines.
AppendToFile(FileToText(self.Config(CHANGELOG_FILE)), new_changelog)
TextToFile(FileToText(new_changelog), self.Config(CHANGELOG_FILE))
os.remove(new_changelog)
class IncrementVersion(Step):
MESSAGE = "Increment version number."
def RunStep(self):
self.RestoreIfUnset("build")
new_build = str(int(self._state["build"]) + 1)
if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will "
"fire up your EDITOR on %s so you can make arbitrary "
"changes. When you're done, save the file and exit your "
"EDITOR.)" % self.Config(VERSION_FILE))):
text = FileToText(self.Config(VERSION_FILE))
text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
r"\g<space>%s" % new_build,
text)
TextToFile(text, self.Config(VERSION_FILE))
else:
self.Editor(self.Config(VERSION_FILE))
self.ReadAndPersistVersion("new_")
class CommitLocal(Step):
MESSAGE = "Commit to local branch."
def RunStep(self):
self.RestoreVersionIfUnset("new_")
prep_commit_msg = ("Prepare push to trunk. "
"Now working on version %s.%s.%s." % (self._state["new_major"],
self._state["new_minor"],
self._state["new_build"]))
self.Persist("prep_commit_msg", prep_commit_msg)
# Include optional TBR only in the git command. The persisted commit
# message is used for finding the commit again later.
review = "\n\nTBR=%s" % self._options.r if self._options.tbr_commit else ""
if self.Git("commit -a -m \"%s%s\"" % (prep_commit_msg, review)) is None:
self.Die("'git commit -a' failed.")
class CommitRepository(Step):
MESSAGE = "Commit to the repository."
def RunStep(self):
self.WaitForLGTM()
# Re-read the ChangeLog entry (to pick up possible changes).
# FIXME(machenbach): This was hanging once with a broken pipe.
TextToFile(GetLastChangeLogEntries(self.Config(CHANGELOG_FILE)),
self.Config(CHANGELOG_ENTRY_FILE))
if self.Git("cl dcommit -f", "PRESUBMIT_TREE_CHECK=\"skip\"") is None:
self.Die("'git cl dcommit' failed, please try again.")
class StragglerCommits(Step):
MESSAGE = ("Fetch straggler commits that sneaked in since this script was "
"started.")
def RunStep(self):
if self.Git("svn fetch") is None:
self.Die("'git svn fetch' failed.")
self.Git("checkout svn/bleeding_edge")
self.RestoreIfUnset("prep_commit_msg")
args = "log -1 --format=%%H --grep=\"%s\"" % self._state["prep_commit_msg"]
prepare_commit_hash = self.Git(args).strip()
self.Persist("prepare_commit_hash", prepare_commit_hash)
class SquashCommits(Step):
MESSAGE = "Squash commits into one."
def RunStep(self):
# Instead of relying on "git rebase -i", we'll just create a diff, because
# that's easier to automate.
self.RestoreIfUnset("prepare_commit_hash")
args = "diff svn/trunk %s" % self._state["prepare_commit_hash"]
TextToFile(self.Git(args), self.Config(PATCH_FILE))
# Convert the ChangeLog entry to commit message format.
self.RestoreIfUnset("date")
text = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
# Remove date and trailing white space.
text = re.sub(r"^%s: " % self._state["date"], "", text.rstrip())
# Remove indentation and merge paragraphs into single long lines, keeping
# empty lines between them.
def SplitMapJoin(split_text, fun, join_text):
return lambda text: join_text.join(map(fun, text.split(split_text)))
strip = lambda line: line.strip()
text = SplitMapJoin("\n\n", SplitMapJoin("\n", strip, " "), "\n\n")(text)
if not text:
self.Die("Commit message editing failed.")
TextToFile(text, self.Config(COMMITMSG_FILE))
os.remove(self.Config(CHANGELOG_ENTRY_FILE))
class NewBranch(Step):
MESSAGE = "Create a new branch from trunk."
def RunStep(self):
if self.Git("checkout -b %s svn/trunk" % self.Config(TRUNKBRANCH)) is None:
self.Die("Checking out a new branch '%s' failed." %
self.Config(TRUNKBRANCH))
class ApplyChanges(Step):
MESSAGE = "Apply squashed changes."
def RunStep(self):
self.ApplyPatch(self.Config(PATCH_FILE))
Command("rm", "-f %s*" % self.Config(PATCH_FILE))
class SetVersion(Step):
MESSAGE = "Set correct version for trunk."
def RunStep(self):
self.RestoreVersionIfUnset()
output = ""
for line in FileToText(self.Config(VERSION_FILE)).splitlines():
if line.startswith("#define MAJOR_VERSION"):
line = re.sub("\d+$", self._state["major"], line)
elif line.startswith("#define MINOR_VERSION"):
line = re.sub("\d+$", self._state["minor"], line)
elif line.startswith("#define BUILD_NUMBER"):
line = re.sub("\d+$", self._state["build"], line)
elif line.startswith("#define PATCH_LEVEL"):
line = re.sub("\d+$", "0", line)
elif line.startswith("#define IS_CANDIDATE_VERSION"):
line = re.sub("\d+$", "0", line)
output += "%s\n" % line
TextToFile(output, self.Config(VERSION_FILE))
class CommitTrunk(Step):
MESSAGE = "Commit to local trunk branch."
def RunStep(self):
self.Git("add \"%s\"" % self.Config(VERSION_FILE))
if self.Git("commit -F \"%s\"" % self.Config(COMMITMSG_FILE)) is None:
self.Die("'git commit' failed.")
Command("rm", "-f %s*" % self.Config(COMMITMSG_FILE))
class SanityCheck(Step):
MESSAGE = "Sanity check."
def RunStep(self):
if not self.Confirm("Please check if your local checkout is sane: Inspect "
"%s, compile, run tests. Do you want to commit this new trunk "
"revision to the repository?" % self.Config(VERSION_FILE)):
self.Die("Execution canceled.")
class CommitSVN(Step):
MESSAGE = "Commit to SVN."
def RunStep(self):
result = self.Git("svn dcommit 2>&1")
if not result:
self.Die("'git svn dcommit' failed.")
result = filter(lambda x: re.search(r"^Committed r[0-9]+", x),
result.splitlines())
if len(result) > 0:
trunk_revision = re.sub(r"^Committed r([0-9]+)", r"\1", result[0])
# Sometimes grepping for the revision fails. No idea why. If you figure
# out why it is flaky, please do fix it properly.
if not trunk_revision:
print("Sorry, grepping for the SVN revision failed. Please look for it "
"in the last command's output above and provide it manually (just "
"the number, without the leading \"r\").")
self.DieNoManualMode("Can't prompt in forced mode.")
while not trunk_revision:
print "> ",
trunk_revision = self.ReadLine()
self.Persist("trunk_revision", trunk_revision)
class TagRevision(Step):
MESSAGE = "Tag the new revision."
def RunStep(self):
self.RestoreVersionIfUnset()
ver = "%s.%s.%s" % (self._state["major"],
self._state["minor"],
self._state["build"])
if self.Git("svn tag %s -m \"Tagging version %s\"" % (ver, ver)) is None:
self.Die("'git svn tag' failed.")
class CheckChromium(Step):
MESSAGE = "Ask for chromium checkout."
def Run(self):
chrome_path = self._options.c
if not chrome_path:
self.DieNoManualMode("Please specify the path to a Chromium checkout in "
"forced mode.")
print ("Do you have a \"NewGit\" Chromium checkout and want "
"this script to automate creation of the roll CL? If yes, enter the "
"path to (and including) the \"src\" directory here, otherwise just "
"press <Return>: "),
chrome_path = self.ReadLine()
self.Persist("chrome_path", chrome_path)
class SwitchChromium(Step):
MESSAGE = "Switch to Chromium checkout."
REQUIRES = "chrome_path"
def RunStep(self):
v8_path = os.getcwd()
self.Persist("v8_path", v8_path)
os.chdir(self._state["chrome_path"])
self.InitialEnvironmentChecks()
# Check for a clean workdir.
if self.Git("status -s -uno").strip() != "":
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Assert that the DEPS file is there.
if not os.path.exists(self.Config(DEPS_FILE)):
self.Die("DEPS file not present.")
class UpdateChromiumCheckout(Step):
MESSAGE = "Update the checkout and create a new branch."
REQUIRES = "chrome_path"
def RunStep(self):
os.chdir(self._state["chrome_path"])
if self.Git("checkout master") is None:
self.Die("'git checkout master' failed.")
if self.Git("pull") is None:
self.Die("'git pull' failed, please try again.")
self.RestoreIfUnset("trunk_revision")
args = "checkout -b v8-roll-%s" % self._state["trunk_revision"]
if self.Git(args) is None:
self.Die("Failed to checkout a new branch.")
class UploadCL(Step):
MESSAGE = "Create and upload CL."
REQUIRES = "chrome_path"
def RunStep(self):
os.chdir(self._state["chrome_path"])
# Patch DEPS file.
self.RestoreIfUnset("trunk_revision")
deps = FileToText(self.Config(DEPS_FILE))
deps = re.sub("(?<=\"v8_revision\": \")([0-9]+)(?=\")",
self._state["trunk_revision"],
deps)
TextToFile(deps, self.Config(DEPS_FILE))
self.RestoreVersionIfUnset()
ver = "%s.%s.%s" % (self._state["major"],
self._state["minor"],
self._state["build"])
if self._options.r:
print "Using account %s for review." % self._options.r
rev = self._options.r
else:
print "Please enter the email address of a reviewer for the roll CL: ",
self.DieNoManualMode("A reviewer must be specified in forced mode.")
rev = self.ReadLine()
args = "commit -am \"Update V8 to version %s.\n\nTBR=%s\"" % (ver, rev)
if self.Git(args) is None:
self.Die("'git commit' failed.")
force_flag = " -f" if self._options.force_upload else ""
if self.Git("cl upload --send-mail%s" % force_flag, pipe=False) is None:
self.Die("'git cl upload' failed, please try again.")
print "CL uploaded."
class SwitchV8(Step):
MESSAGE = "Returning to V8 checkout."
REQUIRES = "chrome_path"
def RunStep(self):
self.RestoreIfUnset("v8_path")
os.chdir(self._state["v8_path"])
class CleanUp(Step):
MESSAGE = "Done!"
def RunStep(self):
self.RestoreVersionIfUnset()
ver = "%s.%s.%s" % (self._state["major"],
self._state["minor"],
self._state["build"])
self.RestoreIfUnset("trunk_revision")
self.RestoreIfUnset("chrome_path")
if self._state["chrome_path"]:
print("Congratulations, you have successfully created the trunk "
"revision %s and rolled it into Chromium. Please don't forget to "
"update the v8rel spreadsheet:" % ver)
else:
print("Congratulations, you have successfully created the trunk "
"revision %s. Please don't forget to roll this new version into "
"Chromium, and to update the v8rel spreadsheet:" % ver)
print "%s\ttrunk\t%s" % (ver, self._state["trunk_revision"])
self.CommonCleanup()
if self.Config(TRUNKBRANCH) != self._state["current_branch"]:
self.Git("branch -D %s" % self.Config(TRUNKBRANCH))
def RunPushToTrunk(config,
options,
side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
step_classes = [
Preparation,
FreshBranch,
DetectLastPush,
PrepareChangeLog,
EditChangeLog,
IncrementVersion,
CommitLocal,
UploadStep,
CommitRepository,
StragglerCommits,
SquashCommits,
NewBranch,
ApplyChanges,
SetVersion,
CommitTrunk,
SanityCheck,
CommitSVN,
TagRevision,
CheckChromium,
SwitchChromium,
UpdateChromiumCheckout,
UploadCL,
SwitchV8,
CleanUp,
]
RunScript(step_classes, config, options, side_effect_handler)
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-c", "--chromium", dest="c",
help=("Specify the path to your Chromium src/ "
"directory to automate the V8 roll."))
result.add_option("-f", "--force", dest="f",
help="Don't prompt the user.",
default=False, action="store_true")
result.add_option("-l", "--last-push", dest="l",
help=("Manually specify the git commit ID "
"of the last push to trunk."))
result.add_option("-m", "--manual", dest="m",
help="Prompt the user at every important step.",
default=False, action="store_true")
result.add_option("-r", "--reviewer", dest="r",
help=("Specify the account name to be used for reviews."))
result.add_option("-s", "--step", dest="s",
help="Specify the step where to start work. Default: 0.",
default=0, type="int")
return result
def ProcessOptions(options):
if options.s < 0:
print "Bad step number %d" % options.s
return False
if not options.m and not options.r:
print "A reviewer (-r) is required in (semi-)automatic mode."
return False
if options.f and options.m:
print "Manual and forced mode cannot be combined."
return False
if not options.m and not options.c:
print "A chromium checkout (-c) is required in (semi-)automatic mode."
return False
return True
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
RunPushToTrunk(CONFIG, PushToTrunkOptions(options))
if __name__ == "__main__":
sys.exit(Main())
|
|
import sys, unittest, struct, math, ctypes
from binascii import hexlify
from ctypes import *
from ctypes.test import xfail
def bin(s):
return hexlify(memoryview(s)).upper()
# Each *simple* type that supports different byte orders has an
# __ctype_be__ attribute that specifies the same type in BIG ENDIAN
# byte order, and a __ctype_le__ attribute that is the same type in
# LITTLE ENDIAN byte order.
#
# For Structures and Unions, these types are created on demand.
class Test(unittest.TestCase):
def X_test(self):
print >> sys.stderr, sys.byteorder
for i in range(32):
bits = BITS()
setattr(bits, "i%s" % i, 1)
dump(bits)
@xfail
def test_endian_short(self):
if sys.byteorder == "little":
self.assertTrue(c_short.__ctype_le__ is c_short)
self.assertTrue(c_short.__ctype_be__.__ctype_le__ is c_short)
else:
self.assertTrue(c_short.__ctype_be__ is c_short)
self.assertTrue(c_short.__ctype_le__.__ctype_be__ is c_short)
s = c_short.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_short.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
self.assertEqual(s.value, 0x1234)
s = c_ushort.__ctype_le__(0x1234)
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.assertEqual(bin(s), "3412")
self.assertEqual(s.value, 0x1234)
@xfail
def test_endian_int(self):
if sys.byteorder == "little":
self.assertTrue(c_int.__ctype_le__ is c_int)
self.assertTrue(c_int.__ctype_be__.__ctype_le__ is c_int)
else:
self.assertTrue(c_int.__ctype_be__ is c_int)
self.assertTrue(c_int.__ctype_le__.__ctype_be__ is c_int)
s = c_int.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_int.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678")
self.assertEqual(bin(s), "12345678")
self.assertEqual(s.value, 0x12345678)
s = c_uint.__ctype_le__(0x12345678)
self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412")
self.assertEqual(bin(s), "78563412")
self.assertEqual(s.value, 0x12345678)
@xfail
def test_endian_longlong(self):
if sys.byteorder == "little":
self.assertTrue(c_longlong.__ctype_le__ is c_longlong)
self.assertTrue(c_longlong.__ctype_be__.__ctype_le__ is c_longlong)
else:
self.assertTrue(c_longlong.__ctype_be__ is c_longlong)
self.assertTrue(c_longlong.__ctype_le__.__ctype_be__ is c_longlong)
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_longlong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.assertEqual(bin(s), "1234567890ABCDEF")
self.assertEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.assertEqual(bin(s), "EFCDAB9078563412")
self.assertEqual(s.value, 0x1234567890ABCDEF)
@xfail
def test_endian_float(self):
if sys.byteorder == "little":
self.assertTrue(c_float.__ctype_le__ is c_float)
self.assertTrue(c_float.__ctype_be__.__ctype_le__ is c_float)
else:
self.assertTrue(c_float.__ctype_be__ is c_float)
self.assertTrue(c_float.__ctype_le__.__ctype_be__ is c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
self.assertAlmostEqual(s.value, math.pi, 6)
s = c_float.__ctype_le__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s))
@xfail
def test_endian_double(self):
if sys.byteorder == "little":
self.assertTrue(c_double.__ctype_le__ is c_double)
self.assertTrue(c_double.__ctype_be__.__ctype_le__ is c_double)
else:
self.assertTrue(c_double.__ctype_be__ is c_double)
self.assertTrue(c_double.__ctype_le__.__ctype_be__ is c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_endian_other(self):
self.assertTrue(c_byte.__ctype_le__ is c_byte)
self.assertTrue(c_byte.__ctype_be__ is c_byte)
self.assertTrue(c_ubyte.__ctype_le__ is c_ubyte)
self.assertTrue(c_ubyte.__ctype_be__ is c_ubyte)
self.assertTrue(c_char.__ctype_le__ is c_char)
self.assertTrue(c_char.__ctype_be__ is c_char)
@xfail
def test_struct_fields_1(self):
if sys.byteorder == "little":
base = BigEndianStructure
else:
base = LittleEndianStructure
class T(base):
pass
_fields_ = [("a", c_ubyte),
("b", c_byte),
("c", c_short),
("d", c_ushort),
("e", c_int),
("f", c_uint),
("g", c_long),
("h", c_ulong),
("i", c_longlong),
("k", c_ulonglong),
("l", c_float),
("m", c_double),
("n", c_char),
("b1", c_byte, 3),
("b2", c_byte, 3),
("b3", c_byte, 2),
("a", c_int * 3 * 3 * 3)]
T._fields_ = _fields_
# these fields do not support different byte order:
for typ in c_wchar, c_void_p, POINTER(c_int):
_fields_.append(("x", typ))
class T(base):
pass
self.assertRaises(TypeError, setattr, T, "_fields_", [("x", typ)])
@xfail
def test_struct_struct(self):
# nested structures with different byteorders
# create nested structures with given byteorders and set memory to data
for nested, data in (
(BigEndianStructure, b'\0\0\0\1\0\0\0\2'),
(LittleEndianStructure, b'\1\0\0\0\2\0\0\0'),
):
for parent in (
BigEndianStructure,
LittleEndianStructure,
Structure,
):
class NestedStructure(nested):
_fields_ = [("x", c_uint32),
("y", c_uint32)]
class TestStructure(parent):
_fields_ = [("point", NestedStructure)]
self.assertEqual(len(data), sizeof(TestStructure))
ptr = POINTER(TestStructure)
s = cast(data, ptr)[0]
del ctypes._pointer_type_cache[TestStructure]
self.assertEqual(s.point.x, 1)
self.assertEqual(s.point.y, 2)
@xfail
def test_struct_fields_2(self):
# standard packing in struct uses no alignment.
# So, we have to align using pad bytes.
#
# Unaligned accesses will crash Python (on those platforms that
# don't allow it, like sparc solaris).
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">bxhid"
else:
base = LittleEndianStructure
fmt = "<bxhid"
class S(base):
_fields_ = [("b", c_byte),
("h", c_short),
("i", c_int),
("d", c_double)]
s1 = S(0x12, 0x1234, 0x12345678, 3.14)
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
@xfail
def test_unaligned_nonnative_struct_fields(self):
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">b h xi xd"
else:
base = LittleEndianStructure
fmt = "<b h xi xd"
class S(base):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_native_struct_fields(self):
if sys.byteorder == "little":
fmt = "<b h xi xd"
else:
base = LittleEndianStructure
fmt = ">b h xi xd"
class S(Structure):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import division
'''
NeuroLearn Analysis Tools
=========================
These tools provide the ability to quickly run
machine-learning analyses on imaging data
'''
__all__ = ['Roc']
__author__ = ["Luke Chang"]
__license__ = "MIT"
import pandas as pd
import numpy as np
from nltools.plotting import roc_plot
from scipy.stats import norm, binom_test
from sklearn.metrics import auc
from copy import deepcopy
class Roc(object):
""" Roc Class
The Roc class is based on Tor Wager's Matlab roc_plot.m function and
allows a user to easily run different types of receiver operator
characteristic curves. For example, one might be interested in single
interval or forced choice.
Args:
input_values: nibabel data instance
binary_outcome: vector of training labels
threshold_type: ['optimal_overall', 'optimal_balanced',
'minimum_sdt_bias']
**kwargs: Additional keyword arguments to pass to the prediction
algorithm
"""
def __init__(self, input_values=None, binary_outcome=None,
threshold_type='optimal_overall', forced_choice=None, **kwargs):
if len(input_values) != len(binary_outcome):
raise ValueError("Data Problem: input_value and binary_outcome"
"are different lengths.")
if not any(binary_outcome):
raise ValueError("Data Problem: binary_outcome may not be boolean")
thr_type = ['optimal_overall', 'optimal_balanced', 'minimum_sdt_bias']
if threshold_type not in thr_type:
raise ValueError("threshold_type must be ['optimal_overall', "
"'optimal_balanced','minimum_sdt_bias']")
self.input_values = deepcopy(input_values)
self.binary_outcome = deepcopy(binary_outcome)
self.threshold_type = deepcopy(threshold_type)
self.forced_choice = deepcopy(forced_choice)
if isinstance(self.binary_outcome, pd.DataFrame):
self.binary_outcome = np.array(self.binary_outcome).flatten()
else:
self.binary_outcome = deepcopy(binary_outcome)
def calculate(self, input_values=None, binary_outcome=None,
criterion_values=None, threshold_type='optimal_overall',
forced_choice=None, balanced_acc=False):
""" Calculate Receiver Operating Characteristic plot (ROC) for
single-interval classification.
Args:
input_values: nibabel data instance
binary_outcome: vector of training labels
criterion_values: (optional) criterion values for calculating fpr
& tpr
threshold_type: ['optimal_overall', 'optimal_balanced',
'minimum_sdt_bias']
forced_choice: index indicating position for each unique subject
(default=None)
balanced_acc: balanced accuracy for single-interval classification
(bool). THIS IS NOT COMPLETELY IMPLEMENTED BECAUSE
IT AFFECTS ACCURACY ESTIMATES, BUT NOT P-VALUES OR
THRESHOLD AT WHICH TO EVALUATE SENS/SPEC
**kwargs: Additional keyword arguments to pass to the prediction
algorithm
"""
if input_values is not None:
self.input_values = deepcopy(input_values)
if binary_outcome is not None:
self.binary_outcome = deepcopy(binary_outcome)
# Create Criterion Values
if criterion_values is not None:
self.criterion_values = deepcopy(criterion_values)
else:
self.criterion_values = np.linspace(np.min(self.input_values.squeeze()),
np.max(self.input_values.squeeze()),
num=50*len(self.binary_outcome))
if forced_choice is not None:
self.forced_choice = deepcopy(forced_choice)
if self.forced_choice is not None:
sub_idx = np.unique(self.forced_choice)
if len(sub_idx) != len(self.binary_outcome)/2:
raise ValueError("Make sure that subject ids are correct for 'forced_choice'.")
if len(set(sub_idx).union(set(np.array(self.forced_choice)[self.binary_outcome]))) != len(sub_idx):
raise ValueError("Issue with forced_choice subject labels.")
if len(set(sub_idx).union(set(np.array(self.forced_choice)[~self.binary_outcome]))) != len(sub_idx):
raise ValueError("Issue with forced_choice subject labels.")
for sub in sub_idx:
sub_mn = (self.input_values[(self.forced_choice == sub) & (self.binary_outcome)]+self.input_values[(self.forced_choice == sub) & (~self.binary_outcome)])[0]/2
self.input_values[(self.forced_choice == sub) & (self.binary_outcome)] = self.input_values[(self.forced_choice == sub) & (self.binary_outcome)][0] - sub_mn
self.input_values[(self.forced_choice == sub) & (~self.binary_outcome)] = self.input_values[(self.forced_choice == sub) & (~self.binary_outcome)][0] - sub_mn
self.class_thr = 0
# Calculate true positive and false positive rate
self.tpr = np.zeros(self.criterion_values.shape)
self.fpr = np.zeros(self.criterion_values.shape)
for i, x in enumerate(self.criterion_values):
wh = self.input_values >= x
self.tpr[i] = np.sum(wh[self.binary_outcome])/np.sum(self.binary_outcome)
self.fpr[i] = np.sum(wh[~self.binary_outcome])/np.sum(~self.binary_outcome)
self.n_true = np.sum(self.binary_outcome)
self.n_false = np.sum(~self.binary_outcome)
self.auc = auc(self.fpr, self.tpr)
# Get criterion threshold
if self.forced_choice is None:
self.threshold_type = threshold_type
if threshold_type == 'optimal_balanced':
mn = (self.tpr+self.fpr)/2
self.class_thr = self.criterion_values[np.argmax(mn)]
elif threshold_type == 'optimal_overall':
n_corr_t = self.tpr*self.n_true
n_corr_f = (1 - self.fpr)*self.n_false
sm = (n_corr_t + n_corr_f)
self.class_thr = self.criterion_values[np.argmax(sm)]
elif threshold_type == 'minimum_sdt_bias':
# Calculate MacMillan and Creelman 2005 Response Bias (c_bias)
c_bias = (norm.ppf(np.maximum(.0001, np.minimum(0.9999, self.tpr))) + norm.ppf(np.maximum(.0001, np.minimum(0.9999, self.fpr)))) / float(2)
self.class_thr = self.criterion_values[np.argmin(abs(c_bias))]
# Calculate output
self.false_positive = (self.input_values >= self.class_thr) & (~self.binary_outcome)
self.false_negative = (self.input_values < self.class_thr) & (self.binary_outcome)
self.misclass = (self.false_negative) | (self.false_positive)
self.true_positive = (self.binary_outcome) & (~self.misclass)
self.true_negative = (~self.binary_outcome) & (~self.misclass)
self.sensitivity = np.sum(self.input_values[self.binary_outcome] >= self.class_thr)/self.n_true
self.specificity = 1 - np.sum(self.input_values[~self.binary_outcome] >= self.class_thr)/self.n_false
self.ppv = np.sum(self.true_positive)/(np.sum(self.true_positive) + np.sum(self.false_positive))
if self.forced_choice is not None:
self.true_positive = self.true_positive[self.binary_outcome]
self.true_negative = self.true_negative[~self.binary_outcome]
self.false_negative = self.false_negative[self.binary_outcome]
self.false_positive = self.false_positive[~self.binary_outcome]
self.misclass = (self.false_positive) | (self.false_negative)
# Calculate Accuracy
if balanced_acc:
self.accuracy = np.mean([self.sensitivity, self.specificity]) # See Brodersen, Ong, Stephan, Buhmann (2010)
else:
self.accuracy = 1 - np.mean(self.misclass)
# Calculate p-Value using binomial test (can add hierarchical version of binomial test)
self.n = len(self.misclass)
self.accuracy_p = binom_test(int(np.sum(~self.misclass)), self.n, p=.5)
self.accuracy_se = np.sqrt(np.mean(~self.misclass) * (np.mean(~self.misclass)) / self.n)
def plot(self, plot_method='gaussian', balanced_acc=False, **kwargs):
""" Create ROC Plot
Create a specific kind of ROC curve plot, based on input values
along a continuous distribution and a binary outcome variable (logical)
Args:
plot_method: type of plot ['gaussian','observed']
binary_outcome: vector of training labels
**kwargs: Additional keyword arguments to pass to the prediction
algorithm
Returns:
fig
"""
self.calculate(balanced_acc=balanced_acc) # Calculate ROC parameters
if plot_method == 'gaussian':
if self.forced_choice is not None:
sub_idx = np.unique(self.forced_choice)
diff_scores = []
for sub in sub_idx:
diff_scores.append(self.input_values[(self.forced_choice == sub) & (self.binary_outcome)][0] - self.input_values[(self.forced_choice == sub) & (~self.binary_outcome)][0])
diff_scores = np.array(diff_scores)
mn_diff = np.mean(diff_scores)
d = mn_diff / np.std(diff_scores)
pooled_sd = np.std(diff_scores) / np.sqrt(2)
d_a_model = mn_diff / pooled_sd
expected_acc = 1 - norm.cdf(0, d, 1)
self.sensitivity = expected_acc
self.specificity = expected_acc
self.ppv = self.sensitivity / (self.sensitivity + 1 - self.specificity)
self.auc = norm.cdf(d_a_model / np.sqrt(2))
x = np.arange(-3, 3, .1)
self.tpr_smooth = 1 - norm.cdf(x, d, 1)
self.fpr_smooth = 1 - norm.cdf(x, -d, 1)
else:
mn_true = np.mean(self.input_values[self.binary_outcome])
mn_false = np.mean(self.input_values[~self.binary_outcome])
var_true = np.var(self.input_values[self.binary_outcome])
var_false = np.var(self.input_values[~self.binary_outcome])
pooled_sd = np.sqrt((var_true*(self.n_true - 1))/(self.n_true + self.n_false - 2))
d = (mn_true - mn_false)/pooled_sd
z_true = mn_true/pooled_sd
z_false = mn_false/pooled_sd
x = np.arange(z_false-3, z_true+3, .1)
self.tpr_smooth = 1 - (norm.cdf(x, z_true, 1))
self.fpr_smooth = 1 - (norm.cdf(x, z_false, 1))
self.aucn = auc(self.fpr_smooth, self.tpr_smooth)
fig = roc_plot(self.fpr_smooth, self.tpr_smooth)
elif plot_method == 'observed':
fig = roc_plot(self.fpr, self.tpr)
else:
raise ValueError("plot_method must be 'gaussian' or 'observed'")
return fig
def summary(self):
""" Display a formatted summary of ROC analysis. """
print("------------------------")
print(".:ROC Analysis Summary:.")
print("------------------------")
print("{:20s}".format("Accuracy:") + "{:.2f}".format(self.accuracy))
print("{:20s}".format("Accuracy SE:") + "{:.2f}".format(self.accuracy_se))
print("{:20s}".format("Accuracy p-value:") + "{:.2f}".format(self.accuracy_p))
print("{:20s}".format("Sensitivity:") + "{:.2f}".format(self.sensitivity))
print("{:20s}".format("Specificity:") + "{:.2f}".format(self.specificity))
print("{:20s}".format("AUC:") + "{:.2f}".format(self.auc))
print("{:20s}".format("PPV:") + "{:.2f}".format(self.ppv))
print("------------------------")
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from builtins import str
from builtins import map
from builtins import object
import re
from unittest2 import TestSuite
from .testcase import CourgetteTestCase
class _TestMaker(object):
""" Make tests
"""
IGNORED_ATTRIBUTES = ["id", "parent_id", "parent_type", "creation_date", "owner", "last_updated_date", "last_updated_by", "external_id"]
def __init__(self, helper):
""" Initializes a TestMaker
"""
self._object_registry = dict()
self._attributes_registry = dict()
self.helper = helper
def register_test(self, function_name, **conditions):
""" Register test for all attributes
"""
if function_name not in self._object_registry:
self._object_registry[function_name] = conditions
def register_test_for_attribute(self, function_name, **conditions):
""" Register an attribute test for all given conditions
"""
if function_name not in self._attributes_registry:
self._attributes_registry[function_name] = conditions
def does_attribute_meet_condition(self, attribute, conditions):
""" Check if the attribute meet all the given conditions
Args:
attribute: the attribute information
conditions: a dictionary of condition to match
Returns:
True if the attribute match all conditions. False otherwise
"""
if conditions is None or len(conditions) == 0:
return True
for attribute_name, attribute_value in conditions.items():
value = getattr(attribute, attribute_name, False)
if value != attribute_value and bool(value) != attribute_value:
return False
return True
def make_tests(self, sdkobject, testcase):
""" Make all tests that should be run for the given object in the specified testcase
Args:
sdkobject: the sdk object
testcase: the test case
Returns:
It returns a dictionary of all tests to run
"""
tests = dict()
attributes = sdkobject.get_attributes()
for attribute in attributes:
if attribute.local_name in self.IGNORED_ATTRIBUTES:
continue
for function_name, conditions in self._attributes_registry.items():
if self.does_attribute_meet_condition(attribute, conditions):
(test_name, test_func) = self._create_test(testcase=testcase, sdkobject=sdkobject, function_name=function_name, attribute=attribute)
tests[test_name] = test_func
for function_name, infos in self._object_registry.items():
(test_name, test_func) = self._create_test(testcase=testcase, sdkobject=sdkobject, function_name=function_name)
tests[test_name] = test_func
return tests
def _create_test(self, testcase, function_name, sdkobject, attribute=None):
""" Create a test method for the sdkoject
Args:
testcase: the testcase to that should manage the method
function_name: the name of the method in the testcase
sdkobject: the object that should be tested
attribute: the attribute information if necessary
Returns:
It returns a tuple (name, method) that represents the test method
"""
func = getattr(testcase, function_name)
object_name = sdkobject.rest_name
# Name that will be displayed
test_name = ""
rep = dict()
rep["object"] = object_name
if attribute:
rep["attribute"] = attribute.local_name
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(list(rep.keys())))
if function_name.startswith("_"):
function_name = function_name[1:]
test_name = pattern.sub(lambda m: rep[re.escape(m.group(0))], function_name)
# Prepare and add test method to test suite
test_func = None
if attribute:
test_func = lambda self, attribute=attribute: func(self, attribute)
else:
test_func = lambda self: func(self)
test_func.__name__ = str(test_name)
return (test_name, test_func)
# CREATE TESTS
class CreateTestMaker(_TestMaker):
""" TestCase for create objects
"""
def __init__(self, parent, sdkobject, helper):
""" Initializes a test case for creating objects
"""
super(CreateTestMaker, self).__init__(helper=helper)
self.parent = parent
self.sdkobject = sdkobject
# Object tests
self.register_test("_test_create_object_with_all_valid_attributes_should_succeed")
self.register_test("_test_create_object_without_authentication_should_fail")
# Attribute tests
self.register_test_for_attribute("_test_create_object_with_required_attribute_as_none_should_fail", is_required=True)
self.register_test_for_attribute("_test_create_object_with_attribute_not_in_allowed_choices_list_should_fail", has_choices=True)
self.register_test_for_attribute("_test_create_object_with_attribute_as_none_should_succeed", is_required=False)
def suite(self):
""" Inject generated tests
"""
CreateTestCase.parent = self.parent
CreateTestCase.sdkobject = self.sdkobject
CreateTestCase.helper = self.helper
tests = self.make_tests(sdkobject=self.sdkobject, testcase=CreateTestCase)
for test_name, test_func in tests.items():
setattr(CreateTestCase, test_name, test_func)
return TestSuite(list(map(CreateTestCase, tests)))
class CreateTestCase(CourgetteTestCase):
def __init__(self, methodName="runTest"):
""" Initialize
"""
CourgetteTestCase.__init__(self, methodName=methodName)
self.pristine_sdkobject = self.sdkobject.copy()
def setUp(self):
""" Setting up create test
"""
self.last_connection = None
self.sdkobject = self.pristine_sdkobject.copy()
def tearDown(self):
""" Clean up environment
"""
if self.sdkobject and self.sdkobject.id:
self.sdkobject.delete()
self.sdkobject.id = None
# Objects tests
def _test_create_object_without_authentication_should_fail(self):
""" Create an object without authentication """
self.helper.set_api_key(None)
(obj, connection) = self.parent.create_child(self.sdkobject)
self.last_connection = connection
self.helper.set_api_key(self.helper.session.root_object.api_key)
self.assertConnectionStatus(connection, 401)
def _test_create_object_with_all_valid_attributes_should_succeed(self):
""" Create an object with all its valid attributes should always succeed with 201 response
"""
(obj, connection) = self.parent.create_child(self.sdkobject)
self.last_connection = connection
self.assertConnectionStatus(connection, 201)
self.assertEquals(obj.to_dict(), self.sdkobject.to_dict())
# Attributes tests
def _test_create_object_with_required_attribute_as_none_should_fail(self, attribute):
""" Create an object with a required attribute as None """
setattr(self.sdkobject, attribute.local_name, None)
(obj, connection) = self.parent.create_child(self.sdkobject)
self.last_connection = connection
self.assertConnectionStatus(connection, 409)
# self.assertErrorEqual(connection.response.errors, title="Invalid input", description="This value is mandatory.", rest_name=attribute.rest_name)
def _test_create_object_with_attribute_as_none_should_succeed(self, attribute):
""" Create an objet with an attribute as none """
setattr(self.sdkobject, attribute.local_name, None)
(obj, connection) = self.parent.create_child(self.sdkobject)
self.last_connection = connection
self.assertConnectionStatus(connection, 201)
# self.assertIsNone(getattr(obj, attribute.local_name), "%s should be none but was %s instead" % (attribute.local_name, getattr(obj, attribute.local_name)))
def _test_create_object_with_attribute_not_in_allowed_choices_list_should_fail(self, attribute):
""" Create an object with a wrong choice attribute """
setattr(self.sdkobject, attribute.local_name, "A random value")
(obj, connection) = self.parent.create_child(self.sdkobject)
self.last_connection = connection
self.assertConnectionStatus(connection, 409)
# self.assertErrorEqual(connection.response.errors, title="Invalid input", description="Invalid input", rest_name=attribute.rest_name)
# UPDATE TESTS
class UpdateTestMaker(_TestMaker):
""" TestCase for updating objects
"""
def __init__(self, parent, sdkobject, helper):
""" Initializes a test case for updating objects
"""
super(UpdateTestMaker, self).__init__(helper=helper)
self.parent = parent
self.sdkobject = sdkobject
# Object tests
# self.register_test("_test_update_object_with_same_attributes_should_fail") # this feature is completely stupid anyway
self.register_test("_test_update_object_without_authentication_should_fail")
# Attribute tests
self.register_test_for_attribute("_test_update_object_with_attribute_not_in_allowed_choices_list_should_fail", has_choices=True)
self.register_test_for_attribute("_test_update_object_with_required_attribute_as_none_should_fail", is_required=True)
self.register_test_for_attribute("_test_update_object_with_attribute_as_none_should_succeed", is_required=False)
# self.register_test_for_attribute("_test_update_object_with_attribute_with_choices_as_none_should_fail", has_choices=True)
def suite(self):
""" Inject generated tests
"""
UpdateTestCase.parent = self.parent
UpdateTestCase.sdkobject = self.sdkobject
UpdateTestCase.helper = self.helper
tests = self.make_tests(sdkobject=self.sdkobject, testcase=UpdateTestCase)
for test_name, test_func in tests.items():
setattr(UpdateTestCase, test_name, test_func)
return TestSuite(list(map(UpdateTestCase, tests)))
class UpdateTestCase(CourgetteTestCase):
def __init__(self, methodName="runTest"):
""" Initialize
"""
CourgetteTestCase.__init__(self, methodName=methodName)
self.pristine_sdkobject = self.sdkobject.copy()
def setUp(self):
""" Setting up create test
"""
self.last_connection = None
self.sdkobject = self.pristine_sdkobject.copy()
self.parent.create_child(self.sdkobject)
def tearDown(self):
""" Clean up environment
"""
self.sdkobject.delete()
# Objects tests
def _test_update_object_without_authentication_should_fail(self):
""" Update an object without authentication
"""
self.helper.set_api_key(None)
(obj, connection) = self.sdkobject.save()
self.last_connection = connection
self.helper.set_api_key(self.helper.session.root_object.api_key)
self.assertConnectionStatus(connection, 401)
def _test_update_object_with_same_attributes_should_fail(self):
""" Update an object with same attributes should always fail with 409 error
"""
(obj, connection) = self.sdkobject.save()
(obj, connection) = self.sdkobject.save()
self.last_connection = connection
self.assertConnectionStatus(connection, 409)
# self.assertErrorEqual(connection.response.errors, title="No changes to modify the entity", description="There are no attribute changes to modify the entity.")
# Attributes tests
def _test_update_object_with_required_attribute_as_none_should_fail(self, attribute):
""" Update an object with a required attribute as None
"""
setattr(self.sdkobject, attribute.local_name, None)
(obj, connection) = self.sdkobject.save()
self.last_connection = connection
self.assertConnectionStatus(connection, 409)
# self.assertErrorEqual(connection.response.errors, title="Invalid input", description="This value is mandatory.", rest_name=attribute.rest_name)
def _test_update_object_with_attribute_with_choices_as_none_should_fail(self, attribute):
""" Update an objet with an attribute with choices as none should fail
"""
setattr(self.sdkobject, attribute.local_name, None)
(obj, connection) = self.sdkobject.save()
self.last_connection = connection
self.assertConnectionStatus(connection, 409)
# self.assertIsNone(getattr(obj, attribute.local_name), "%s should be none but was %s instead" % (attribute.local_name, getattr(obj, attribute.local_name)))
def _test_update_object_with_attribute_not_in_allowed_choices_list_should_fail(self, attribute):
""" Update an object with a wrong choice attribute
"""
setattr(self.sdkobject, attribute.local_name, "A random value")
(obj, connection) = self.sdkobject.save()
self.last_connection = connection
self.assertConnectionStatus(connection, 409)
# self.assertErrorEqual(connection.response.errors, title="Invalid input", description="Invalid input", rest_name=attribute.rest_name)
def _test_update_object_with_attribute_as_none_should_succeed(self, attribute):
""" Update an objet with an attribute as none
"""
setattr(self.sdkobject, attribute.local_name, None)
(obj, connection) = self.sdkobject.save()
self.last_connection = connection
self.assertConnectionStatus(connection, 200)
self.assertIsNone(getattr(obj, attribute.local_name), "%s should be none but was %s instead" % (attribute.local_name, getattr(obj, attribute.local_name)))
# DELETE TESTS
class DeleteTestMaker(_TestMaker):
""" TestCase for create objects
"""
def __init__(self, parent, sdkobject, helper):
""" Initializes a test case for creating objects
"""
super(DeleteTestMaker, self).__init__(helper=helper)
self.parent = parent
self.sdkobject = sdkobject
# Object tests
self.register_test("_test_delete_object_without_authentication_should_fail")
self.register_test("_test_delete_object_with_valid_id_should_succeed")
self.register_test("_test_delete_object_with_wrong_id_should_fail")
# No Attribute tests
def suite(self):
""" Inject generated tests
"""
DeleteTestCase.parent = self.parent
DeleteTestCase.sdkobject = self.sdkobject
DeleteTestCase.helper = self.helper
tests = self.make_tests(sdkobject=self.sdkobject, testcase=DeleteTestCase)
for test_name, test_func in tests.items():
setattr(DeleteTestCase, test_name, test_func)
return TestSuite(list(map(DeleteTestCase, tests)))
class DeleteTestCase(CourgetteTestCase):
def __init__(self, methodName="runTest"):
""" Initialize
"""
CourgetteTestCase.__init__(self, methodName=methodName)
self.pristine_sdkobject = self.sdkobject.copy()
def setUp(self):
""" Setting up create test
"""
self.last_connection = None
self.sdkobject = self.pristine_sdkobject.copy()
self.parent.create_child(self.sdkobject)
def tearDown(self):
""" Clean up environment
"""
if self.sdkobject.id is not None:
self.sdkobject.delete()
# Objects tests
def _test_delete_object_without_authentication_should_fail(self):
""" Delete an object without authentication
"""
self.helper.set_api_key(None)
(obj, connection) = self.sdkobject.delete()
self.last_connection = connection
self.helper.set_api_key(self.helper.session.root_object.api_key)
self.assertConnectionStatus(connection, 401)
def _test_delete_object_with_valid_id_should_succeed(self):
""" Delete an object with its id should always succeed with 204 response
"""
(obj, connection) = self.sdkobject.delete()
self.last_connection = connection
self.assertConnectionStatus(connection, 200)
self.assertEquals(obj.to_dict(), self.sdkobject.to_dict())
self.sdkobject.id = None # so it won't be deleted again in tearDown
def _test_delete_object_with_wrong_id_should_fail(self):
""" Delete an object with a wrong id should fail with 404 error
"""
default_id = self.sdkobject.id
invalid_id = "000-000-000-000-00-000"
self.sdkobject.id = invalid_id
(obj, connection) = self.sdkobject.delete()
self.last_connection = connection
self.sdkobject.id = default_id
self.assertConnectionStatus(connection, 404)
# self.assertErrorEqual(connection.response.errors, title="%s not found" % self.sdkobject.rest_name, description="Cannot find %s with ID %s" % (self.sdkobject.rest_name, invalid_id))
# GET TESTS
class GetTestMaker(_TestMaker):
""" TestCase for create objects
"""
def __init__(self, parent, sdkobject, helper):
""" Initializes a test case for creating objects
"""
super(GetTestMaker, self).__init__(helper=helper)
self.parent = parent
self.sdkobject = sdkobject
# Object tests
self.register_test("_test_get_object_without_authentication_should_fail")
self.register_test("_test_get_object_with_valid_id_should_succeed")
self.register_test("_test_get_object_with_wrong_id_should_fail")
# No Attribute tests
def suite(self):
""" Inject generated tests
"""
GetTestCase.parent = self.parent
GetTestCase.sdkobject = self.sdkobject
GetTestCase.helper = self.helper
tests = self.make_tests(sdkobject=self.sdkobject, testcase=GetTestCase)
for test_name, test_func in tests.items():
setattr(GetTestCase, test_name, test_func)
return TestSuite(list(map(GetTestCase, tests)))
class GetTestCase(CourgetteTestCase):
def __init__(self, methodName="runTest"):
""" Initialize
"""
CourgetteTestCase.__init__(self, methodName=methodName)
self.pristine_sdkobject = self.sdkobject.copy()
def setUp(self):
""" Setting up get test
"""
self.last_connection = None
self.sdkobject = self.pristine_sdkobject.copy()
self.parent.create_child(self.sdkobject)
def tearDown(self):
""" Clean up environment
"""
self.sdkobject.delete()
# Objects tests
def _test_get_object_without_authentication_should_fail(self):
""" Get an object without authentication
"""
self.helper.set_api_key(None)
(obj, connection) = self.sdkobject.fetch()
self.last_connection = connection
self.helper.set_api_key(self.helper.session.root_object.api_key)
self.assertConnectionStatus(connection, 401)
def _test_get_object_with_valid_id_should_succeed(self):
""" Get an object with its id should always succeed with 204 response
"""
(obj, connection) = self.sdkobject.fetch()
self.last_connection = connection
self.assertConnectionStatus(connection, 200)
self.assertEquals(obj.to_dict(), self.sdkobject.to_dict())
def _test_get_object_with_wrong_id_should_fail(self):
""" Get an object with a wrong id should fail with 404 error
"""
default_id = self.sdkobject.id
invalid_id = "000-000-000-000-00-000"
self.sdkobject.id = invalid_id
(obj, connection) = self.sdkobject.fetch()
self.last_connection = connection
self.sdkobject.id = default_id
self.assertConnectionStatus(connection, 404)
# self.assertErrorEqual(connection.response.errors, title="%s not found" % self.sdkobject.rest_name, description="Cannot find %s with ID %s" % (self.sdkobject.rest_name, invalid_id))
# GETALL TESTS
class GetAllTestMaker(_TestMaker):
""" TestCase for create objects
"""
def __init__(self, parent, sdkobject, helper):
""" Initializes a test case for creating objects
"""
super(GetAllTestMaker, self).__init__(helper=helper)
self.parent = parent
self.sdkobject = sdkobject
# Object tests
self.register_test("_test_get_all_objects_without_authentication_should_fail")
self.register_test("_test_get_all_objects_without_content_should_success")
self.register_test("_test_get_all_objects_with_content_should_success")
# No Attribute tests
def suite(self):
""" Inject generated tests
"""
GetAllTestCase.parent = self.parent
GetAllTestCase.sdkobject = self.sdkobject
GetAllTestCase.helper = self.helper
tests = self.make_tests(sdkobject=self.sdkobject, testcase=GetAllTestCase)
for test_name, test_func in tests.items():
setattr(GetAllTestCase, test_name, test_func)
return TestSuite(list(map(GetAllTestCase, tests)))
class GetAllTestCase(CourgetteTestCase):
def __init__(self, methodName="runTest"):
""" Initialize
"""
CourgetteTestCase.__init__(self, methodName=methodName)
self.pristine_sdkobject = self.sdkobject.copy()
def setUp(self):
""" Setting up get test
"""
self.last_connection = None
self.sdkobject = self.pristine_sdkobject.copy()
def tearDown(self):
""" Clean up environment
"""
pass
# Objects tests
def _test_get_all_objects_without_authentication_should_fail(self):
""" Get all object without authentication
"""
self.helper.set_api_key(None)
fetcher = self.parent.fetcher_for_rest_name(self.sdkobject.rest_name)
(fetcher, parent, children) = fetcher.fetch()
connection = fetcher.current_connection
self.last_connection = connection
self.helper.set_api_key(self.helper.session.root_object.api_key)
self.assertConnectionStatus(connection, 401)
def _test_get_all_objects_without_content_should_success(self):
""" Get all object without content should succeed with 200 response
"""
fetcher = self.parent.fetcher_for_rest_name(self.sdkobject.rest_name)
(fetcher, parent, children) = fetcher.fetch()
connection = fetcher.current_connection
self.last_connection = connection
self.assertConnectionStatus(connection, 200)
def _test_get_all_objects_with_content_should_success(self):
""" Get all object with content should succeed with 200 response
"""
self.parent.create_child(self.sdkobject)
fetcher = self.parent.fetcher_for_rest_name(self.sdkobject.rest_name)
(fetcher, parent, children) = fetcher.fetch()
connection = fetcher.current_connection
self.last_connection = connection
self.sdkobject.delete()
self.assertConnectionStatus(connection, 200)
# Attributes tests
# Filter, Order, Page etc.
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic training script that trains a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from tensorflow.contrib import quantize as contrib_quantize
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from preprocessing import preprocessing_factory
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', '/tmp/tfmodel/',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_float(
'warmup_epochs', 0,
'Linearly warmup learning rate from 0 to learning_rate over this '
'many epochs.')
tf.app.flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy. Note For '
'historical reasons loss from all clones averaged '
'out and learning rate decay happen per clone '
'epochs')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'rmsprop',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
tf.app.flags.DEFINE_integer(
'quantize_delay', -1,
'Number of steps to start quantized training. Set to -1 would disable '
'quantized training.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays. Note: this flag counts '
'epochs per clone but aggregates per sync replicas. So 1.0 means that '
'each clone will go over full epoch individually, but replicas will go '
'once across all replicas.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'imagenet', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
tf.app.flags.DEFINE_bool('use_grayscale', False,
'Whether to convert input images to grayscale.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
# Note: when num_clones is > 1, this will actually have each clone to go
# over each epoch FLAGS.num_epochs_per_decay times. This is different
# behavior from sync replicas and is expected to produce different results.
steps_per_epoch = num_samples_per_epoch / FLAGS.batch_size
if FLAGS.sync_replicas:
steps_per_epoch /= FLAGS.replicas_to_aggregate
decay_steps = int(steps_per_epoch * FLAGS.num_epochs_per_decay)
if FLAGS.learning_rate_decay_type == 'exponential':
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
learning_rate = tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
learning_rate = tf.train.polynomial_decay(
FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized' %
FLAGS.learning_rate_decay_type)
if FLAGS.warmup_epochs:
warmup_lr = (
FLAGS.learning_rate * tf.cast(global_step, tf.float32) /
(steps_per_epoch * FLAGS.warmup_epochs))
learning_rate = tf.minimum(warmup_lr, learning_rate)
return learning_rate
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized' % FLAGS.optimizer)
return optimizer
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
break
else:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
#######################
# Config model_deploy #
#######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# Create global_step
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
######################
# Select the network #
######################
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=True)
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=True,
use_grayscale=FLAGS.use_grayscale)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
train_image_size = FLAGS.train_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, train_image_size, train_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(
labels, dataset.num_classes - FLAGS.labels_offset)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2 * deploy_config.num_clones)
####################
# Define the model #
####################
def clone_fn(batch_queue):
"""Allows data parallelism by creating multiple clones of network_fn."""
images, labels = batch_queue.dequeue()
logits, end_points = network_fn(images)
#############################
# Specify the loss function #
#############################
if 'AuxLogits' in end_points:
slim.losses.softmax_cross_entropy(
end_points['AuxLogits'], labels,
label_smoothing=FLAGS.label_smoothing, weights=0.4,
scope='aux_loss')
slim.losses.softmax_cross_entropy(
logits, labels, label_smoothing=FLAGS.label_smoothing, weights=1.0)
return end_points
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by network_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# Add summaries for end_points.
end_points = clones[0].outputs
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram('activations/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity/' + end_point,
tf.nn.zero_fraction(x)))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
if FLAGS.quantize_delay >= 0:
contrib_quantize.create_training_graph(quant_delay=FLAGS.quantize_delay)
#########################################
# Configure the optimization procedure. #
#########################################
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
if FLAGS.sync_replicas:
# If sync_replicas is enabled, the averaging will be done in the chief
# queue runner.
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=FLAGS.replicas_to_aggregate,
total_num_replicas=FLAGS.worker_replicas,
variable_averages=variable_averages,
variables_to_average=moving_average_variables)
elif FLAGS.moving_average_decay:
# Update ops executed locally by trainer.
update_ops.append(variable_averages.apply(moving_average_variables))
# Variables to train.
variables_to_train = _get_variables_to_train()
# and returns a train_tensor and summary_op
total_loss, clones_gradients = model_deploy.optimize_clones(
clones,
optimizer,
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_tensor,
logdir=FLAGS.train_dir,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
init_fn=_get_init_fn(),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
sync_optimizer=optimizer if FLAGS.sync_replicas else None)
if __name__ == '__main__':
tf.app.run()
|
|
#!/usr/bin/env python
################################################################################
# Created by Oscar Martinez #
# o.rubi@esciencecenter.nl #
################################################################################
import time, multiprocessing,numpy
from pointcloud import utils
# Module containing common code for the DB queriers and loaders
def getSelectCols(columnsKeys, columnsNameDict, statistics = None, cas = False):
selectColumns = []
for i in range(len(columnsKeys)):
columnDBName = columnsNameDict[columnsKeys[i]][0]
if statistics == None:
if cas:
selectColumns.append(columnDBName + ' as ' + columnsKeys[i])
else:
selectColumns.append(columnDBName)
else:
selectColumns.append(statistics[i] + '(' + columnDBName + ') as stat' + str(i))
return ','.join(selectColumns)
def createResultsTable(cursor, executeMethod, tableName, columnsKeys, columnsNameTypeDict, statistics = None):
cs = []
for i in range(len(columnsKeys)):
columnData = columnsNameTypeDict[columnsKeys[i]]
columnDBName = columnData[0]
columnDBType = columnData[1]
if statistics == None:
cs.append(columnDBName + ' ' + columnDBType)
else:
cs.append(statistics[i] + columnDBName + ' ' + columnDBType)
executeMethod(cursor,"create table " + tableName + " (" + ','.join(cs) + ")")
def getNumPoints(cursor, tableName):
try:
cursor.execute('select count(*) from ' + tableName)
numpoints = int(cursor.fetchone()[0])
except:
numpoints = -1
cursor.connection.rollback()
return numpoints
def getStatistics(cursor, tableName, columnsNameDict, preComputed = True, columnsKeys = None, statistics = None):
try:
if preComputed:
cursor.execute('select * from ' + tableName)
else:
cursor.execute('select ' + getSelectCols(columnsKeys, columnsNameDict, statistics) + ' from ' + tableName)
stats = list(cursor.fetchone())
for i in range(len(stats)):
stats[i] = str(stats[i])
result = ','.join(stats)
except:
result = '-'
cursor.connection.rollback()
return result
def getResult(cursor, t0, tableName, columnsNameDict, preComputed = True, columnsKeys = None, statistics = None):
if statistics != None:
result = getStatistics(cursor, tableName, columnsNameDict, preComputed, columnsKeys, statistics)
eTime = time.time() - t0
else:
eTime = time.time() - t0
result = getNumPoints(cursor, tableName)
return (eTime,result)
def addZCondition(queryParameters, zColumnName, queryArgs):
zconds = []
if queryParameters.minz != None:
if queryParameters.hardcode:
zconds.append(" " + zColumnName + " >= " + str(queryParameters.minz) + " ")
else:
zconds.append(" " + zColumnName + " >= " + queryParameters.pattern + " ")
queryArgs.append(queryParameters.minz)
if queryParameters.maxz != None:
if queryParameters.hardcode:
zconds.append(" " + zColumnName + " <= " + str(queryParameters.maxz) + " ")
else:
zconds.append(" " + zColumnName + " <= " + queryParameters.pattern + " ")
queryArgs.append(queryParameters.maxz)
return ' AND '.join(zconds)
def addMortonCondition(queryParameters, mortonRanges, mortonColumnName, queryArgs):
elements = []
for mortonRange in mortonRanges:
if queryParameters.hardcode:
elements.append('(' + mortonColumnName + ' between ' + str(mortonRange[0]) + ' and ' + str(mortonRange[1]) + ')')
else:
elements.append('(' + mortonColumnName + ' between ' + queryParameters.pattern + ' and ' + queryParameters.pattern + ')')
queryArgs.extend(mortonRange)
if len(elements):
return '(' + ' OR '.join(elements) + ')'
return None
def addBBoxCondition(queryParameters, xColumnName, yColumnName, queryArgs):
if queryParameters.hardcode:
return "(" + xColumnName + " between " + str(queryParameters.minx) + " and " + str(queryParameters.maxx) + ") and (" + yColumnName + " between " + str(queryParameters.miny) + " and " + str(queryParameters.maxy) + ")"
else:
queryArgs.extend([queryParameters.minx, queryParameters.maxx, queryParameters.miny, queryParameters.maxy])
return "(" + xColumnName + " between " + queryParameters.pattern + " and " + queryParameters.pattern + ") and (" + yColumnName + " between " + queryParameters.pattern + " and " + queryParameters.pattern + ")"
def addBBoxCircleCondition(queryParameters, xColumnName, yColumnName, queryArgs):
if queryParameters.hardcode:
return "(" + xColumnName + " between " + str(queryParameters.cx - queryParameters.rad) + " and " + str(queryParameters.cx + queryParameters.rad) + ") and (" + yColumnName + " between " + str(queryParameters.cy - queryParameters.rad) + " and " + str(queryParameters.cy + queryParameters.rad) + ")"
else:
queryArgs.extend([queryParameters.cx - queryParameters.rad,queryParameters.cx + queryParameters.rad,queryParameters.cy - queryParameters.rad,queryParameters.cy + queryParameters.rad])
return "(" + xColumnName + " between " + queryParameters.pattern + " and " + queryParameters.pattern + ") and (" + yColumnName + " between " + queryParameters.pattern + " and " + queryParameters.pattern + ")"
def addCircleCondition(queryParameters, xColumnName, yColumnName, queryArgs):
if queryParameters.hardcode:
if queryParameters.powermethod:
return "(power(" + xColumnName + " - " + str(queryParameters.cx) + ",2) + power(" + yColumnName + " - " + str(queryParameters.cy) + ",2) < power(" + str(queryParameters.rad) + ",2))"
else:
return "(((" + xColumnName + " - " + str(queryParameters.cx) + ")^2) + ((" + yColumnName + " - " + str(queryParameters.cy) + ")^2) < (" + str(queryParameters.rad) + "^2))"
else:
queryArgs.extend([queryParameters.cx,queryParameters.cy,queryParameters.rad])
if queryParameters.powermethod:
return "(power(" + xColumnName + " - " + queryParameters.pattern + ",2) + power(" + yColumnName + " - " + queryParameters.pattern + ",2) < power(" + queryParameters.pattern + ",2))"
else:
return "(((" + xColumnName + " - " + queryParameters.pattern + ")^2) + ((" + yColumnName + " - " +queryParameters.pattern + ")^2) < (" + queryParameters.pattern + "^2))"
def addOrderByDistance(queryParameters, xColumnName, yColumnName, queryArgs):
if queryParameters.hardcode:
if queryParameters.powermethod:
return " ORDER BY (power(" + xColumnName + " - " + str(queryParameters.cx) + ",2) + power(" + yColumnName + " - " + str(queryParameters.cy) + ",2))"
else:
return " ORDER BY (((" + xColumnName + " - " + str(queryParameters.cx) + ")^2) + ((" + yColumnName + " - " + str(queryParameters.cy) + ")^2))"
else:
queryArgs.extend([queryParameters.cx, queryParameters.cy])
if queryParameters.powermethod:
return " ORDER BY (power(" + xColumnName + " - " + queryParameters.pattern + ",2) + power(" + yColumnName + " - " + queryParameters.pattern + ",2))"
else:
return " ORDER BY (((" + xColumnName + " - " +queryParameters.pattern + ")^2) + ((" + yColumnName + " - " + queryParameters.pattern + ")^2))"
def addLimit(queryParameters, queryArgs):
if queryParameters.db == 'ora':
return " WHERE ROWNUM <= " + str(queryParameters.num)
else:
queryArgs.append(queryParameters.num)
return " LIMIT " + queryParameters.pattern
def getWhereStatement(conditions, operator = ' AND '):
if type(conditions) not in (list, tuple):
conditions = [conditions,]
cs = []
for condition in conditions:
if condition != '':
cs.append(condition)
if len(cs):
return ' WHERE ' + (operator.join(cs)) + ' '
return ''
def distinctTable(cursor, tableName, executeMethod):
tempTable = 'DIRT_' + tableName
executeMethod(cursor, 'alter table ' + tableName + ' rename to ' + tempTable)
executeMethod(cursor, 'CREATE TABLE ' + tableName + ' AS SELECT DISTINCT * FROM ' + tempTable)
executeMethod(cursor, 'DROP TABLE ' + tempTable)
def getSelect(queryParameters, flatTable, addContainsConditionMethod, columnsNameDict, hints = None):
queryArgs = []
xname = columnsNameDict['x'][0]
yname = columnsNameDict['y'][0]
zname = columnsNameDict['z'][0]
if queryParameters.queryType in ('rectangle', 'generic'):
bBoxCondition = addBBoxCondition(queryParameters, xname, yname, queryArgs)
else:
bBoxCondition = addBBoxCircleCondition(queryParameters, xname, yname, queryArgs)
zCondition = addZCondition(queryParameters, zname, queryArgs)
cols = getSelectCols(queryParameters.columns, columnsNameDict, queryParameters.statistics)
if hints == None:
hints = ''
if queryParameters.queryType == 'rectangle':
query = "SELECT " + hints + cols + " FROM " + flatTable + getWhereStatement([bBoxCondition,zCondition])
elif queryParameters.queryType == 'circle':
specificCondition = addCircleCondition(queryParameters, xname, yname, queryArgs)
query = "SELECT " + cols + " FROM (select " + hints + "* FROM " + flatTable + getWhereStatement([bBoxCondition,zCondition]) + ") b " + getWhereStatement(specificCondition)
elif queryParameters.queryType == 'generic':
(queryTable, specificCondition) = addContainsConditionMethod(queryParameters, queryArgs, xname, yname)
if queryParameters.db != 'ora':
tables = ['ftf']
if queryTable != None:
tables.append(queryTable)
query = "SELECT " + cols + " FROM ( SELECT * FROM " + flatTable + getWhereStatement([bBoxCondition,zCondition]) + ") " + ",".join(tables) + getWhereStatement(specificCondition)
else:
query = "SELECT " + cols + " from table ( sdo_PointInPolygon ( cursor ( select " + hints + "* FROM " + flatTable + getWhereStatement([bBoxCondition,zCondition]) + " ), " + specificCondition + "))"
elif queryParameters.queryType == 'nn' :
orderBy = addOrderByDistance(queryParameters, xname, yname, queryArgs)
limit = addLimit(queryParameters, queryArgs)
if queryParameters.db != 'ora':
query = "SELECT " + cols + " FROM " + flatTable + getWhereStatement([bBoxCondition,zCondition]) + orderBy + limit
else:
query = "SELECT " + cols + " FROM (SELECT " + hints + "* FROM " + flatTable + getWhereStatement([bBoxCondition,zCondition]) + " ) b " + limit + orderBy
else:
raise Exception('ERROR: ' + queryParameters.queryType + ' not supported!')
return (query, queryArgs)
def getSelectMorton(iMortonRanges, xMortonRanges, queryParameters, flatTable, addContainsConditionMethod, columnsNameDict, hints = None):
queryArgs = []
xname = columnsNameDict['x'][0]
yname = columnsNameDict['y'][0]
zname = columnsNameDict['z'][0]
kname = columnsNameDict['k'][0]
query = ''
if hints == None:
hints = ''
if len(iMortonRanges):
if queryParameters.queryType == 'nn':
raise Exception('If using NN len(iMortonRanges) must be 0!')
cols = getSelectCols(queryParameters.columns, columnsNameDict)
inConditions = [
addMortonCondition(queryParameters, iMortonRanges, kname, queryArgs),
addZCondition(queryParameters, zname, queryArgs)]
query = "SELECT " + cols + " FROM " + flatTable + getWhereStatement(inConditions) + " UNION "
else:
cols = getSelectCols(queryParameters.columns, columnsNameDict, queryParameters.statistics)
mortonCondition = addMortonCondition(queryParameters, xMortonRanges, kname, queryArgs)
zCondition = addZCondition(queryParameters, zname, queryArgs)
if queryParameters.queryType in ('rectangle', 'circle'):
if queryParameters.queryType == 'rectangle' :
specificCondition = addBBoxCondition(queryParameters, xname, yname, queryArgs)
else:
specificCondition = addCircleCondition(queryParameters, xname, yname, queryArgs)
query += "SELECT " + hints + cols + " FROM (SELECT * FROM " + flatTable + getWhereStatement([mortonCondition,zCondition]) + ") a " + getWhereStatement(specificCondition)
elif queryParameters.queryType == 'generic' :
(queryTable, specificCondition) = addContainsConditionMethod(queryParameters,queryArgs, xname, yname)
if queryParameters.db != 'ora':
tables = ['ftf']
if queryTable != None:
tables.append(queryTable)
query += "SELECT " + cols + " FROM (SELECT * FROM " + flatTable + getWhereStatement([mortonCondition,zCondition]) + ") " + ",".join(tables) + getWhereStatement(specificCondition)
else:
query += "SELECT " + hints + cols + " from table ( sdo_PointInPolygon ( cursor ( select " + hints + "* FROM " + flatTable + getWhereStatement([mortonCondition,zCondition]) + " ), " + specificCondition + "))"
elif queryParameters.queryType == 'nn':
orderBy = addOrderByDistance(queryParameters, xname, yname, queryArgs)
limit = addLimit(queryParameters, queryArgs)
if queryParameters.db != 'ora':
query = "SELECT " + cols + " FROM " + flatTable + getWhereStatement([mortonCondition,zCondition]) + orderBy + limit
else:
query = "SELECT " + cols + " FROM (SELECT " + hints + "* FROM " + flatTable + getWhereStatement([mortonCondition,zCondition]) + " ) b " + limit + orderBy
else:
#Approximation
query += "SELECT " + hints + cols + " FROM " + flatTable + getWhereStatement([mortonCondition,zCondition])
return (query, queryArgs)
def genericQueryParallelGrid(cursor, queryMethod, executeMethod, columns, columnsNameTypeDict, statistics, resultTable, gridTable, createGridTableMethod, childMethod, numProcessesQuery, distinct, createSQLFileMethod, executeSQLFileCountMethod, streamConnectionString):
if distinct and (('x' not in columns) or ('x' not in columns) or ('z' not in columns)):
raise Exception('GRID distinct requires to have access to columns x, y and z!')
(nrows,ncols) = utils.getNRowNCol(numProcessesQuery)
t0 = time.time()
createResultsTable(cursor, executeMethod, resultTable, columns, columnsNameTypeDict, None)
createGridTableMethod(cursor, gridTable, ncols, nrows)
children = []
for i in range(numProcessesQuery):
children.append(multiprocessing.Process(target=childMethod, args=(i, gridTable)))
children[-1].start()
# wait for all children to finish their execution
for i in range(numProcessesQuery):
children[i].join()
if distinct:
distinctTable(cursor, resultTable, executeMethod)
if queryMethod == 'stream':
sqlFileName = str(resultTable) + '.sql'
query = 'SELECT * FROM ' + resultTable
createSQLFileMethod(cursor, sqlFileName, query, None)
result = executeSQLFileCountMethod(streamConnectionString, sqlFileName)
eTime = time.time() - t0
return (eTime, result)
else:
return getResult(cursor, t0, resultTable, columnsNameTypeDict, False, columns, statistics)
def genericQueryParallelCand(cursor, queryMethod, executeMethod, columns, columnsNameTypeDict, statistics, resultTable, idsQuery, idsQueryArgs, childMethod, numProcessesQuery, createSQLFileMethod, executeSQLFileCountMethod, streamConnectionString):
t0 = time.time()
createResultsTable(cursor, executeMethod, resultTable, columns, columnsNameTypeDict, None)
executeMethod(cursor, idsQuery, idsQueryArgs)
blkIds = numpy.array(cursor.fetchall())[:,0]
children = []
for chunkIds in numpy.array_split(blkIds, numProcessesQuery):
children.append(multiprocessing.Process(target=childMethod,
args=(chunkIds,)))
children[-1].start()
# wait for all children to finish their execution
for i in range(numProcessesQuery):
children[i].join()
if queryMethod == 'stream':
sqlFileName = str(resultTable) + '.sql'
query = 'SELECT * FROM ' + resultTable
createSQLFileMethod(cursor, sqlFileName, query, None)
result = executeSQLFileCountMethod(streamConnectionString, sqlFileName)
eTime = time.time() - t0
return (eTime, result)
else:
return getResult(cursor, t0, resultTable, columnsNameTypeDict, False, columns, statistics)
def parallelMorton(iMortonRanges, xMortonRanges, childMethod, numProcessesQuery):
if iMortonRanges != None:
numMRanges = max((len(iMortonRanges), len(xMortonRanges)))
if numMRanges > numProcessesQuery:
numChunks = numProcessesQuery
else:
numChunks = numMRanges
ichunks = numpy.array_split(iMortonRanges, numChunks)
xchunks = numpy.array_split(xMortonRanges, numChunks)
else:
numMRanges = len(xMortonRanges)
if numMRanges > numProcessesQuery:
numChunks = numProcessesQuery
else:
numChunks = numMRanges
ichunks = numpy.array_split([], numChunks)
xchunks = numpy.array_split(xMortonRanges, numChunks)
children = []
for i in range(numChunks):
children.append(multiprocessing.Process(target=childMethod,
args=(ichunks[i],xchunks[i])))
children[-1].start()
# wait for all children to finish their execution
for i in range(numChunks):
children[i].join()
|
|
#!/usr/bin/env python
# pylint: disable-msg=W0404,W0622,W0704,W0613,W0152
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Generic Setup script, takes package info from __pkginfo__.py file.
"""
__docformat__ = "restructuredtext en"
import os
import sys
import shutil
from os.path import isdir, exists, join, walk
try:
if os.environ.get('NO_SETUPTOOLS'):
raise ImportError()
from setuptools import setup
from setuptools.command import install_lib
USE_SETUPTOOLS = 1
except ImportError:
from distutils.core import setup
from distutils.command import install_lib
USE_SETUPTOOLS = 0
sys.modules.pop('__pkginfo__', None)
# import required features
from __pkginfo__ import modname, version, license, description, \
web, author, author_email
# import optional features
import __pkginfo__
distname = getattr(__pkginfo__, 'distname', modname)
scripts = getattr(__pkginfo__, 'scripts', [])
data_files = getattr(__pkginfo__, 'data_files', None)
subpackage_of = getattr(__pkginfo__, 'subpackage_of', None)
include_dirs = getattr(__pkginfo__, 'include_dirs', [])
ext_modules = getattr(__pkginfo__, 'ext_modules', None)
install_requires = getattr(__pkginfo__, 'install_requires', None)
dependency_links = getattr(__pkginfo__, 'dependency_links', [])
STD_BLACKLIST = ('CVS', '.svn', '.hg', 'debian', 'dist', 'build')
IGNORED_EXTENSIONS = ('.pyc', '.pyo', '.elc', '~')
if exists('README'):
long_description = file('README').read()
else:
long_description = ''
def ensure_scripts(linux_scripts):
"""Creates the proper script names required for each platform
(taken from 4Suite)
"""
from distutils import util
if util.get_platform()[:3] == 'win':
scripts_ = [script + '.bat' for script in linux_scripts]
else:
scripts_ = linux_scripts
return scripts_
def get_packages(directory, prefix):
"""return a list of subpackages for the given directory"""
result = []
for package in os.listdir(directory):
absfile = join(directory, package)
if isdir(absfile):
if exists(join(absfile, '__init__.py')) or \
package in ('test', 'tests'):
if prefix:
result.append('%s.%s' % (prefix, package))
else:
result.append(package)
result += get_packages(absfile, result[-1])
return result
def export(from_dir, to_dir,
blacklist=STD_BLACKLIST,
ignore_ext=IGNORED_EXTENSIONS,
verbose=True):
"""make a mirror of from_dir in to_dir, omitting directories and files
listed in the black list
"""
def make_mirror(arg, directory, fnames):
"""walk handler"""
for norecurs in blacklist:
try:
fnames.remove(norecurs)
except ValueError:
pass
for filename in fnames:
# don't include binary files
if filename[-4:] in ignore_ext:
continue
if filename[-1] == '~':
continue
src = join(directory, filename)
dest = to_dir + src[len(from_dir):]
if verbose:
print >> sys.stderr, src, '->', dest
if os.path.isdir(src):
if not exists(dest):
os.mkdir(dest)
else:
if exists(dest):
os.remove(dest)
shutil.copy2(src, dest)
try:
os.mkdir(to_dir)
except OSError, ex:
# file exists ?
import errno
if ex.errno != errno.EEXIST:
raise
walk(from_dir, make_mirror, None)
EMPTY_FILE = '''"""generated file, don\'t modify or your data will be lost"""
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass
'''
class MyInstallLib(install_lib.install_lib):
"""extend install_lib command to handle package __init__.py and
include_dirs variable if necessary
"""
def run(self):
"""overridden from install_lib class"""
install_lib.install_lib.run(self)
# create Products.__init__.py if needed
if subpackage_of:
product_init = join(self.install_dir, subpackage_of, '__init__.py')
if not exists(product_init):
self.announce('creating %s' % product_init)
stream = open(product_init, 'w')
stream.write(EMPTY_FILE)
stream.close()
# manually install included directories if any
if include_dirs:
if subpackage_of:
base = join(subpackage_of, modname)
else:
base = modname
for directory in include_dirs:
dest = join(self.install_dir, base, directory)
export(directory, dest, verbose=False)
def install(**kwargs):
"""setup entry point"""
if USE_SETUPTOOLS:
if '--force-manifest' in sys.argv:
sys.argv.remove('--force-manifest')
# install-layout option was introduced in 2.5.3-1~exp1
elif sys.version_info < (2, 5, 4) and '--install-layout=deb' in sys.argv:
sys.argv.remove('--install-layout=deb')
if subpackage_of:
package = subpackage_of + '.' + modname
kwargs['package_dir'] = {package : '.'}
packages = [package] + get_packages(os.getcwd(), package)
if USE_SETUPTOOLS:
kwargs['namespace_packages'] = [subpackage_of]
else:
kwargs['package_dir'] = {modname : '.'}
packages = [modname] + get_packages(os.getcwd(), modname)
if USE_SETUPTOOLS and install_requires:
kwargs['install_requires'] = install_requires
kwargs['dependency_links'] = dependency_links
kwargs['packages'] = packages
return setup(name = distname,
version = version,
license = license,
description = description,
long_description = long_description,
author = author,
author_email = author_email,
url = web,
scripts = ensure_scripts(scripts),
data_files = data_files,
ext_modules = ext_modules,
cmdclass = {'install_lib': MyInstallLib},
**kwargs
)
if __name__ == '__main__' :
install()
|
|
"""Parameters module."""
import argparse
import simple_parsing
import os
import random
# import getpass
# import torch
# import torch.nn.parallel
# import torch.backends.cudnn as cudnn
# import torch.utils.data
from dataclasses import dataclass, field
from typing import ClassVar, Optional, Tuple
from simple_parsing import choice
@dataclass
class DatasetParams:
"""Dataset Parameters"""
default_root: ClassVar[str] = "/dataset" # the default root directory to use.
dataset: str = "objects_folder_multi" # laptop,pistol
""" dataset name: [shapenet, objects_folder, objects_folder]') """
root_dir: str = default_root # dataset root directory
root_dir1: str = default_root # dataset root directory
root_dir2: str = default_root # dataset root directory
root_dir3: str = default_root # dataset root directory
root_dir4: str = default_root # dataset root directory
synsets: str = "" # Synsets from the shapenet dataset to use
classes: str = "bowl" # Classes from the shapenet dataset to use #,cap,can,laptop
workers: int = 0 # number of data loading workers
light_change: int = 2000 # number of data loading workers
toy_example: bool = False # Use toy example
use_old_sign: bool = True # Use toy example
use_quartic: bool = False # Use toy example
rescaled: bool = False # Use toy example
full_sphere_sampling: bool = False # Use toy example
full_sphere_sampling_light: bool = True # Use toy example
random_rotation: bool = True # Use toy example
stoch_enc: bool = False # Use toy example
only_background: bool = False # Use toy example
only_foreground: bool = False # Use toy example
rotate_foreground: bool = False # Use toy example
use_penality: bool = True # Use toy example
use_mesh: bool = True # Render dataset with meshes
gen_model_path: Optional[str] = None # 'dataset root directory
gen_model_path2: Optional[str] = None # dataset root directory
dis_model_path: Optional[str] = None # dataset root directory
dis_model_path2: Optional[str] = None # dataset root directory
bg_model: str = "../../../data/halfbox.obj" # Background model path
gz_gi_loss: float = 0.0 # grad z and grad img consistency.
pixel_samples: int = 1 # Samples per pixel.
@dataclass
class NetworkParams:
# Network parameters
gen_type: str = choice(
"dcgan", "mlp", "cnn", "resnet", default="dcgan"
) # One of: mlp, cnn, dcgan, resnet # try resnet :)
gen_norm: str = choice(
"batchnorm", "instancenorm", default="batchnorm"
) # One of: None, batchnorm, instancenorm
ngf: int = 75 # number of features in the generator network
nef: int = 65 # number of features in the generator network
gen_nextra_layers: int = 0 # number of extra layers in the generator network
gen_bias_type: Optional[str] = choice(
None, "plane", default=None
) # One of: None, plane
netG: str = "" # path to netG (to continue training)
netG2: str = "" # path to netG2 (normal generator to continue training)
fix_splat_pos: bool = True # X and Y coordinates are fix
zloss: float = 0.0 # use Z loss
unit_normalloss: float = 0.0 # use unit_normal loss
norm_sph_coord: bool = True # Use spherical coordinates for the normal
max_gnorm: float = 500.0 # max grad norm to which it will be clipped (if exceeded)
disc_type: str = choice("cnn", "dcgan", default="cnn") # One of: cnn, dcgan
disc_norm: str = choice(
"None", "batchnorm", "instancenorm", default="None"
) # One of: None, batchnorm, instancenorm
ndf: int = 75 # number of features in the discriminator network
disc_nextra_layers: int = 0 # number of extra layers in the discriminator network
nz: int = 100 # size of the latent z vector
netD: str = "" # path to netD (to continue training)
netE: str = "" # path to netD (to continue training)
@dataclass
class OptimizerParams:
"""Optimization parameters"""
optimizer: str = "adam" # Optimizer (adam, rmsprop)
lr: float = 0.0001 # learning rate, default=0.0002
lr_sched_type: str = "step" # Learning rate scheduler type.
z_lr_sched_step: int = 100000 # Learning rate schedule for z.
lr_iter: int = 10000 # Learning rate operation iterations
normal_lr_sched_step: int = 100000 # Learning rate schedule for normal.
z_lr_sched_gamma: float = 1.0 # Learning rate gamma for z.
normal_lr_sched_gamma: float = 1.0 # Learning rate gamma for normal.
normal_consistency_loss_weight: float = 1e-3 # Normal consistency loss weight.
z_norm_weight_init: float = 1e-2 # Normal consistency loss weight.
z_norm_activate_iter: float = 1000 # Normal consistency loss weight.
spatial_var_loss_weight: float = 1e-2 # Spatial variance loss weight.
grad_img_depth_loss: float = 2.0 # Spatial variance loss weight.
spatial_loss_weight: float = 0.5 # Spatial smoothness loss weight.
beta1: float = 0.0 # beta1 for adam. default=0.5
n_iter: int = 76201 # number of iterations to train
batchSize: int = 4 # input batch size
alt_opt_zn_interval: Optional[int] = None
""" Alternating optimization interval.
- None: joint optimization
- 20: every 20 iterations, etc.
"""
alt_opt_zn_start: int = 100000
"""Alternating optimization start interation.
- -1: starts immediately,
- '100: starts alternating after the first 100 iterations.
"""
@dataclass
class GanParams:
"""Gan parameters"""
criterion: str = choice("GAN", "WGAN", default="WGAN") # GAN Training criterion
gp: str = choice("None", "original", default="original") # Add gradient penalty
gp_lambda: float = 10.0 # GP lambda
critic_iters: int = 5 # Number of critic iterations
clamp: float = 0.01 # clamp the weights for WGAN
@dataclass
class OtherParams:
"""Other parameters"""
manualSeed: int = 1 # manual seed
no_cuda: bool = False # enables cuda
ngpu: int = 1 # number of GPUs to use
out_dir: str = "default_output"
name: str = ""
@dataclass
class CameraParams:
"""Camera Parameters"""
cam_pos: Tuple[float, float, float] = (0.0, 0.0, 0.0) # Camera position.
width: int = 128
height: int = 128
cam_dist: float = 3.0 # Camera distance from the center of the object
nv: int = 10 # Number of views to generate
angle: int = 30 # cam angle
fovy: float = 30 # Field of view in the vertical direction.
focal_length: float = 0.1 # focal length
theta: Tuple[float, float] = (20, 80) # Angle in degrees from the z-axis.
phi: Tuple[float, float] = (20, 70) # Angle in degrees from the x-axis.
axis: Tuple[float, float, float] = (
0.0,
1.0,
0.0,
) # Axis for random camera position.
at: Tuple[float, float, float] = (0.05, 0.0, 0.0) # Camera lookat position.
sphere_halfbox: bool = False # Renders demo sphere-halfbox
norm_depth_image_only: bool = False # Render on the normalized depth image.
mesh: bool = False # Render as mesh if enabled.
test_cam_dist: bool = (
False # Check if the images are consistent with a camera at a fixed distance.
)
@dataclass
class RenderingParams:
splats_img_size: int = 128 # the height / width of the number of generator splats
render_type: str = "img" # render the image or the depth map [img, depth]
render_img_size: int = 128 # Width/height of the rendering image
splats_radius: float = 0.05 # radius of the splats (fix)
est_normals: bool = False # Estimate normals from splat positions.
n_splats: Optional[int] = None
same_view: bool = False # before we add conditioning on cam pose, this is necessary
""" data with view fixed """
print_interval: int = 10 # Print loss interval.
save_image_interval: int = 100 # Save image interval.
save_interval: int = 5000 # Save state interval.
@dataclass
class Parameters:
"""base options."""
# Dataset parameters.
dataset: DatasetParams = DatasetParams()
# Set of parameters related to the optimizer.
optimizer: OptimizerParams = OptimizerParams()
# GAN Settings
gan: GanParams = GanParams()
# Camera settings
camera: CameraParams = CameraParams()
# Rendering-related settings
rendering: RenderingParams = RenderingParams()
# other (misc) settings
other: OtherParams = OtherParams()
def __post_init__(self):
"""Post-initialization code"""
# Make output folder
# try:
# os.makedirs(self.other.out_dir)
# except OSError:
# pass
# Set render number of channels
if self.rendering.render_type == "img":
self.rendering.render_img_nc = 3
elif self.rendering.render_type == "depth":
self.rendering.render_img_nc = 1
else:
raise ValueError("Unknown rendering type")
# # Set random seed
# if self.other.manualSeed is None:
# self.other.manualSeed = random.randint(1, 10000)
# print("Random Seed: ", self.other.manualSeed)
# random.seed(self.other.manualSeed)
# torch.manual_seed(self.other.manualSeed)
# if not self.other.no_cuda:
# torch.cuda.manual_seed_all(self.other.manualSeed)
# # Set number of splats param
# self.rendering.n_splats = self.rendering.splats_img_size ** 2
# # Check CUDA is selected
# cudnn.benchmark = True
# if torch.cuda.is_available() and self.other.no_cuda:
# print("WARNING: You have a CUDA device, so you should "
# "probably run with --cuda")
@classmethod
def parse(cls):
parser = simple_parsing.ArgumentParser()
parser.add_arguments(cls, dest="parameters")
args = parser.parse_args()
instance: Parameters = args.parameters
return instance
params = Parameters.parse()
print(params)
|
|
"""
Base typeclass for in-game Channels.
"""
from evennia.typeclasses.models import TypeclassBase
from evennia.comms.models import Msg, TempMsg, ChannelDB
from evennia.comms.managers import ChannelManager
from evennia.utils import logger
from evennia.utils.utils import make_iter
class DefaultChannel(ChannelDB):
"""
This is the base class for all Channel Comms. Inherit from this to
create different types of communication channels.
"""
# typeclass setup
__metaclass__ = TypeclassBase
objects = ChannelManager()
def at_first_save(self):
"""
Called by the typeclass system the very first time the channel
is saved to the database. Generally, don't overload this but
the hooks called by this method.
"""
self.at_channel_creation()
if hasattr(self, "_createdict"):
# this is only set if the channel was created
# with the utils.create.create_channel function.
cdict = self._createdict
if not cdict.get("key"):
if not self.db_key:
self.db_key = "#i" % self.dbid
elif cdict["key"] and self.key != cdict["key"]:
self.key = cdict["key"]
if cdict.get("keep_log"):
self.db_keep_log = cdict["keep_log"]
if cdict.get("aliases"):
self.aliases.add(cdict["aliases"])
if cdict.get("locks"):
self.locks.add(cdict["locks"])
if cdict.get("keep_log"):
self.attributes.add("keep_log", cdict["keep_log"])
if cdict.get("desc"):
self.attributes.add("desc", cdict["desc"])
def at_channel_creation(self):
"""
Called once, when the channel is first created.
"""
pass
# helper methods, for easy overloading
def has_connection(self, subscriber):
"""
Checks so this player is actually listening
to this channel.
Args:
subscriber (Player or Object): Entity to check.
Returns:
has_sub (bool): Whether the subscriber is subscribing to
this channel or not.
Notes:
This will first try Player subscribers and only try Object
if the Player fails.
"""
has_sub = self.subscriptions.has(subscriber)
if not has_sub and hasattr(subscriber, "player"):
# it's common to send an Object when we
# by default only allow Players to subscribe.
has_sub = self.subscriptions.has(subscriber.player)
return has_sub
def connect(self, subscriber):
"""
Connect the user to this channel. This checks access.
Args:
subscriber (Player or Object): the entity to subscribe
to this channel.
Returns:
success (bool): Whether or not the addition was
successful.
"""
# check access
if not self.access(subscriber, 'listen'):
return False
# pre-join hook
connect = self.pre_join_channel(subscriber)
if not connect:
return False
# subscribe
self.subscriptions.add(subscriber)
# post-join hook
self.post_join_channel(subscriber)
return True
def disconnect(self, subscriber):
"""
Disconnect entity from this channel.
Args:
subscriber (Player of Object): the
entity to disconnect.
Returns:
success (bool): Whether or not the removal was
successful.
"""
# pre-disconnect hook
disconnect = self.pre_leave_channel(subscriber)
if not disconnect:
return False
# disconnect
self.subscriptions.remove(subscriber)
# post-disconnect hook
self.post_leave_channel(subscriber)
return True
def access(self, accessing_obj, access_type='listen', default=False, no_superuser_bypass=False):
"""
Determines if another object has permission to access.
Args:
accessing_obj (Object): Object trying to access this one.
access_type (str, optional): Type of access sought.
default (bool, optional): What to return if no lock of access_type was found
no_superuser_bypass (bool, optional): Turns off superuser
lock bypass. Be careful with this one.
Returns:
return (bool): Result of lock check.
"""
return self.locks.check(accessing_obj, access_type=access_type,
default=default, no_superuser_bypass=no_superuser_bypass)
def delete(self):
"""
Deletes channel while also cleaning up channelhandler.
"""
self.attributes.clear()
self.aliases.clear()
super(DefaultChannel, self).delete()
from evennia.comms.channelhandler import CHANNELHANDLER
CHANNELHANDLER.update()
def message_transform(self, msg, emit=False, prefix=True,
sender_strings=None, external=False):
"""
Generates the formatted string sent to listeners on a channel.
Args:
msg (str): Message to send.
emit (bool, optional): In emit mode the message is not associated
with a specific sender name.
prefix (bool, optional): Prefix `msg` with a text given by `self.channel_prefix`.
sender_strings (list, optional): Used by bots etc, one string per external sender.
external (bool, optional): If this is an external sender or not.
"""
if sender_strings or external:
body = self.format_external(msg, sender_strings, emit=emit)
else:
body = self.format_message(msg, emit=emit)
if prefix:
body = "%s%s" % (self.channel_prefix(msg, emit=emit), body)
msg.message = body
return msg
def distribute_message(self, msg, online=False):
"""
Method for grabbing all listeners that a message should be
sent to on this channel, and sending them a message.
msg (str): Message to distribute.
online (bool): Only send to receivers who are actually online
(not currently used):
"""
# get all players connected to this channel and send to them
for entity in self.subscriptions.all():
try:
# note our addition of the from_channel keyword here. This could be checked
# by a custom player.msg() to treat channel-receives differently.
entity.msg(msg.message, from_obj=msg.senders, from_channel=self.id)
except AttributeError, e:
logger.log_trace("%s\nCannot send msg to '%s'." % (e, entity))
def msg(self, msgobj, header=None, senders=None, sender_strings=None,
persistent=False, online=False, emit=False, external=False):
"""
Send the given message to all players connected to channel. Note that
no permission-checking is done here; it is assumed to have been
done before calling this method. The optional keywords are not used if
persistent is False.
Args:
msgobj (Msg, TempMsg or str): If a Msg/TempMsg, the remaining
keywords will be ignored (since the Msg/TempMsg object already
has all the data). If a string, this will either be sent as-is
(if persistent=False) or it will be used together with `header`
and `senders` keywords to create a Msg instance on the fly.
header (str, optional): A header for building the message.
senders (Object, Player or list, optional): Optional if persistent=False, used
to build senders for the message.
sender_strings (list, optional): Name strings of senders. Used for external
connections where the sender is not a player or object.
When this is defined, external will be assumed.
persistent (bool, optional): Ignored if msgobj is a Msg or TempMsg.
If True, a Msg will be created, using header and senders
keywords. If False, other keywords will be ignored.
online (bool, optional) - If this is set true, only messages people who are
online. Otherwise, messages all players connected. This can
make things faster, but may not trigger listeners on players
that are offline.
emit (bool, optional) - Signals to the message formatter that this message is
not to be directly associated with a name.
external (bool, optional): Treat this message as being
agnostic of its sender.
Returns:
success (bool): Returns `True` if message sending was
successful, `False` otherwise.
"""
if senders:
senders = make_iter(senders)
else:
senders = []
if isinstance(msgobj, basestring):
# given msgobj is a string
msg = msgobj
if persistent and self.db.keep_log:
msgobj = Msg()
msgobj.save()
else:
# Use TempMsg, so this message is not stored.
msgobj = TempMsg()
msgobj.header = header
msgobj.message = msg
msgobj.channels = [self] # add this channel
if not msgobj.senders:
msgobj.senders = senders
msgobj = self.pre_send_message(msgobj)
if not msgobj:
return False
msgobj = self.message_transform(msgobj, emit=emit,
sender_strings=sender_strings,
external=external)
self.distribute_message(msgobj, online=online)
self.post_send_message(msgobj)
return True
def tempmsg(self, message, header=None, senders=None):
"""
A wrapper for sending non-persistent messages.
Args:
message (str): Message to send.
header (str, optional): Header of message to send.
senders (Object or list, optional): Senders of message to send.
"""
self.msg(message, senders=senders, header=header, persistent=False)
# hooks
def channel_prefix(self, msg=None, emit=False):
"""
Hook method. How the channel should prefix itself for users.
Args:
msg (str, optional): Prefix text
emit (bool, optional): Switches to emit mode, which usually
means to ignore any sender information. Not used by default.
Returns:
prefix (str): The created channel prefix.
"""
return '[%s] ' % self.key
def format_senders(self, senders=None):
"""
Hook method. Function used to format a list of sender names.
Args:
senders (list): Sender object names.
Returns:
formatted_list (str): The list of names formatted appropriately.
Notes:
This function exists separately so that external sources
can use it to format source names in the same manner as
normal object/player names.
"""
if not senders:
return ''
return ', '.join(senders)
def pose_transform(self, msgobj, sender_string):
"""
Hook method. Detects if the sender is posing, and modifies the
message accordingly.
Args:
msgob (Msg or TempMsg): The message to analyze for a pose.
sender_string (str): The name of the sender/poser.
Returns:
string (str): A message that combines the `sender_string`
component with `msg` in different ways depending on if a
pose was performed or not (this must be analyzed by the
hook).
"""
pose = False
message = msgobj.message
message_start = message.lstrip()
if message_start.startswith((':', ';')):
pose = True
message = message[1:]
if not message.startswith((':', "'", ',')):
if not message.startswith(' '):
message = ' ' + message
if pose:
return '%s%s' % (sender_string, message)
else:
return '%s: %s' % (sender_string, message)
def format_external(self, msgobj, senders, emit=False):
"""
Hook method. Used for formatting external messages. This is
needed as a separate operation because the senders of external
messages may not be in-game objects/players, and so cannot
have things like custom user preferences.
Args:
msgobj (Msg or TempMsg): The message to send.
senders (list): Strings, one per sender.
emit (bool, optional): A sender-agnostic message or not.
Returns:
transformed (str): A formatted string.
"""
if emit or not senders:
return msgobj.message
senders = ', '.join(senders)
return self.pose_transform(msgobj, senders)
def format_message(self, msgobj, emit=False):
"""
Hook method. Formats a message body for display.
Args:
msgob (Msg or TempMsg): The message object to send.
emit (bool, optional): The message is agnostic of senders.
Returns:
transformed (str): The formatted message.
"""
# We don't want to count things like external sources as senders for
# the purpose of constructing the message string.
senders = [sender for sender in msgobj.senders if hasattr(sender, 'key')]
if not senders:
emit = True
if emit:
return msgobj.message
else:
senders = [sender.key for sender in msgobj.senders]
senders = ', '.join(senders)
return self.pose_transform(msgobj, senders)
def pre_join_channel(self, joiner):
"""
Hook method. Runs right before a channel is joined. If this
returns a false value, channel joining is aborted.
Args:
joiner (object): The joining object.
Returns:
should_join (bool): If `False`, channel joining is aborted.
"""
return True
def post_join_channel(self, joiner):
"""
Hook method. Runs right after an object or player joins a channel.
Args:
joiner (object): The joining object.
"""
pass
def pre_leave_channel(self, leaver):
"""
Hook method. Runs right before a user leaves a channel. If this returns a false
value, leaving the channel will be aborted.
Args:
joiner (object): The joining object.
Returns:
should_leave (bool): If `False`, channel parting is aborted.
"""
return True
def post_leave_channel(self, leaver):
"""
Hook method. Runs right after an object or player leaves a channel.
Args:
joiner (object): The joining object.
"""
pass
def pre_send_message(self, msg):
"""
Hook method. Runs before a message is sent to the channel and
should return the message object, after any transformations.
If the message is to be discarded, return a false value.
Args:
msg (Msg or TempMsg): Message to send.
Returns:
result (Msg, TempMsg or bool): If False, abort send.
"""
return msg
def post_send_message(self, msg):
"""
Hook method. Run after a message is sent to the channel.
Args:
msg (Msg or TempMsg): Message sent.
"""
pass
def at_init(self):
"""
Hook method. This is always called whenever this channel is
initiated -- that is, whenever it its typeclass is cached from
memory. This happens on-demand first time the channel is used
or activated in some way after being created but also after
each server restart or reload.
"""
pass
|
|
"""
Support for magicseaweed data from magicseaweed.com.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.magicseaweed/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY, CONF_NAME, CONF_MONITORED_CONDITIONS, ATTR_ATTRIBUTION)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['magicseaweed==1.0.0']
_LOGGER = logging.getLogger(__name__)
CONF_HOURS = 'hours'
CONF_SPOT_ID = 'spot_id'
CONF_UNITS = 'units'
CONF_UPDATE_INTERVAL = 'update_interval'
DEFAULT_UNIT = 'us'
DEFAULT_NAME = 'MSW'
DEFAULT_ATTRIBUTION = "Data provided by magicseaweed.com"
ICON = 'mdi:waves'
HOURS = ['12AM', '3AM', '6AM', '9AM', '12PM', '3PM', '6PM', '9PM']
SENSOR_TYPES = {
'max_breaking_swell': ['Max'],
'min_breaking_swell': ['Min'],
'swell_forecast': ['Forecast'],
}
UNITS = ['eu', 'uk', 'us']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SPOT_ID): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOURS, default=None):
vol.All(cv.ensure_list, [vol.In(HOURS)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNITS): vol.In(UNITS),
})
# Return cached results if last scan was less then this time ago.
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=30)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Magicseaweed sensor."""
name = config.get(CONF_NAME)
spot_id = config[CONF_SPOT_ID]
api_key = config[CONF_API_KEY]
hours = config.get(CONF_HOURS)
if CONF_UNITS in config:
units = config.get(CONF_UNITS)
elif hass.config.units.is_metric:
units = UNITS[0]
else:
units = UNITS[2]
forecast_data = MagicSeaweedData(
api_key=api_key,
spot_id=spot_id,
units=units)
forecast_data.update()
# If connection failed don't setup platform.
if forecast_data.currently is None or forecast_data.hourly is None:
return
sensors = []
for variable in config[CONF_MONITORED_CONDITIONS]:
sensors.append(MagicSeaweedSensor(forecast_data, variable, name,
units))
if 'forecast' not in variable and hours is not None:
for hour in hours:
sensors.append(MagicSeaweedSensor(
forecast_data, variable, name, units, hour))
add_entities(sensors, True)
class MagicSeaweedSensor(Entity):
"""Implementation of a MagicSeaweed sensor."""
def __init__(self, forecast_data, sensor_type, name, unit_system,
hour=None):
"""Initialize the sensor."""
self.client_name = name
self.data = forecast_data
self.hour = hour
self.type = sensor_type
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._name = SENSOR_TYPES[sensor_type][0]
self._icon = None
self._state = None
self._unit_system = unit_system
self._unit_of_measurement = None
@property
def name(self):
"""Return the name of the sensor."""
if self.hour is None and 'forecast' in self.type:
return "{} {}".format(self.client_name, self._name)
if self.hour is None:
return "Current {} {}".format(self.client_name, self._name)
return "{} {} {}".format(
self.hour, self.client_name, self._name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_system(self):
"""Return the unit system of this entity."""
return self._unit_system
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the entity weather icon, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
def update(self):
"""Get the latest data from Magicseaweed and updates the states."""
self.data.update()
if self.hour is None:
forecast = self.data.currently
else:
forecast = self.data.hourly[self.hour]
self._unit_of_measurement = forecast.swell_unit
if self.type == 'min_breaking_swell':
self._state = forecast.swell_minBreakingHeight
elif self.type == 'max_breaking_swell':
self._state = forecast.swell_maxBreakingHeight
elif self.type == 'swell_forecast':
summary = "{} - {}".format(
forecast.swell_minBreakingHeight,
forecast.swell_maxBreakingHeight)
self._state = summary
if self.hour is None:
for hour, data in self.data.hourly.items():
occurs = hour
hr_summary = "{} - {} {}".format(
data.swell_minBreakingHeight,
data.swell_maxBreakingHeight,
data.swell_unit)
self._attrs[occurs] = hr_summary
if self.type != 'swell_forecast':
self._attrs.update(forecast.attrs)
class MagicSeaweedData:
"""Get the latest data from MagicSeaweed."""
def __init__(self, api_key, spot_id, units):
"""Initialize the data object."""
import magicseaweed
self._msw = magicseaweed.MSW_Forecast(api_key, spot_id,
None, units)
self.currently = None
self.hourly = {}
# Apply throttling to methods using configured interval
self.update = Throttle(MIN_TIME_BETWEEN_UPDATES)(self._update)
def _update(self):
"""Get the latest data from MagicSeaweed."""
try:
forecasts = self._msw.get_future()
self.currently = forecasts.data[0]
for forecast in forecasts.data[:8]:
hour = dt_util.utc_from_timestamp(
forecast.localTimestamp).strftime("%-I%p")
self.hourly[hour] = forecast
except ConnectionError:
_LOGGER.error("Unable to retrieve data from Magicseaweed")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_available_stacks_request(
*,
os_type_selected: Optional[Union[str, "_models.Enum10"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-15"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/availableStacks')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if os_type_selected is not None:
query_parameters['osTypeSelected'] = _SERIALIZER.query("os_type_selected", os_type_selected, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_function_app_stacks_request(
*,
stack_os_type: Optional[Union[str, "_models.Enum11"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-15"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/functionAppStacks')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if stack_os_type is not None:
query_parameters['stackOsType'] = _SERIALIZER.query("stack_os_type", stack_os_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_function_app_stacks_for_location_request(
location: str,
*,
stack_os_type: Optional[Union[str, "_models.Enum12"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-15"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/locations/{location}/functionAppStacks')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if stack_os_type is not None:
query_parameters['stackOsType'] = _SERIALIZER.query("stack_os_type", stack_os_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_web_app_stacks_for_location_request(
location: str,
*,
stack_os_type: Optional[Union[str, "_models.Enum13"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-15"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/locations/{location}/webAppStacks')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if stack_os_type is not None:
query_parameters['stackOsType'] = _SERIALIZER.query("stack_os_type", stack_os_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_operations_request(
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-15"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/operations')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_web_app_stacks_request(
*,
stack_os_type: Optional[Union[str, "_models.Enum14"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-15"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/webAppStacks')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if stack_os_type is not None:
query_parameters['stackOsType'] = _SERIALIZER.query("stack_os_type", stack_os_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_available_stacks_on_prem_request(
subscription_id: str,
*,
os_type_selected: Optional[Union[str, "_models.Enum15"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-15"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Web/availableStacks')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if os_type_selected is not None:
query_parameters['osTypeSelected'] = _SERIALIZER.query("os_type_selected", os_type_selected, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ProviderOperations(object):
"""ProviderOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2021_01_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_available_stacks(
self,
os_type_selected: Optional[Union[str, "_models.Enum10"]] = None,
**kwargs: Any
) -> Iterable["_models.ApplicationStackCollection"]:
"""Get available application frameworks and their versions.
Description for Get available application frameworks and their versions.
:param os_type_selected:
:type os_type_selected: str or ~azure.mgmt.web.v2021_01_15.models.Enum10
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationStackCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_15.models.ApplicationStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_available_stacks_request(
os_type_selected=os_type_selected,
template_url=self.get_available_stacks.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_available_stacks_request(
os_type_selected=os_type_selected,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ApplicationStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_available_stacks.metadata = {'url': '/providers/Microsoft.Web/availableStacks'} # type: ignore
@distributed_trace
def get_function_app_stacks(
self,
stack_os_type: Optional[Union[str, "_models.Enum11"]] = None,
**kwargs: Any
) -> Iterable["_models.FunctionAppStackCollection"]:
"""Get available Function app frameworks and their versions.
Description for Get available Function app frameworks and their versions.
:param stack_os_type: Stack OS Type.
:type stack_os_type: str or ~azure.mgmt.web.v2021_01_15.models.Enum11
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FunctionAppStackCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_15.models.FunctionAppStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FunctionAppStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_function_app_stacks_request(
stack_os_type=stack_os_type,
template_url=self.get_function_app_stacks.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_function_app_stacks_request(
stack_os_type=stack_os_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("FunctionAppStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_function_app_stacks.metadata = {'url': '/providers/Microsoft.Web/functionAppStacks'} # type: ignore
@distributed_trace
def get_function_app_stacks_for_location(
self,
location: str,
stack_os_type: Optional[Union[str, "_models.Enum12"]] = None,
**kwargs: Any
) -> Iterable["_models.FunctionAppStackCollection"]:
"""Get available Function app frameworks and their versions for location.
Description for Get available Function app frameworks and their versions for location.
:param location: Function App stack location.
:type location: str
:param stack_os_type: Stack OS Type.
:type stack_os_type: str or ~azure.mgmt.web.v2021_01_15.models.Enum12
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FunctionAppStackCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_15.models.FunctionAppStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FunctionAppStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_function_app_stacks_for_location_request(
location=location,
stack_os_type=stack_os_type,
template_url=self.get_function_app_stacks_for_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_function_app_stacks_for_location_request(
location=location,
stack_os_type=stack_os_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("FunctionAppStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_function_app_stacks_for_location.metadata = {'url': '/providers/Microsoft.Web/locations/{location}/functionAppStacks'} # type: ignore
@distributed_trace
def get_web_app_stacks_for_location(
self,
location: str,
stack_os_type: Optional[Union[str, "_models.Enum13"]] = None,
**kwargs: Any
) -> Iterable["_models.WebAppStackCollection"]:
"""Get available Web app frameworks and their versions for location.
Description for Get available Web app frameworks and their versions for location.
:param location: Web App stack location.
:type location: str
:param stack_os_type: Stack OS Type.
:type stack_os_type: str or ~azure.mgmt.web.v2021_01_15.models.Enum13
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebAppStackCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_15.models.WebAppStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_web_app_stacks_for_location_request(
location=location,
stack_os_type=stack_os_type,
template_url=self.get_web_app_stacks_for_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_web_app_stacks_for_location_request(
location=location,
stack_os_type=stack_os_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WebAppStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_web_app_stacks_for_location.metadata = {'url': '/providers/Microsoft.Web/locations/{location}/webAppStacks'} # type: ignore
@distributed_trace
def list_operations(
self,
**kwargs: Any
) -> Iterable["_models.CsmOperationCollection"]:
"""Gets all available operations for the Microsoft.Web resource provider. Also exposes resource
metric definitions.
Description for Gets all available operations for the Microsoft.Web resource provider. Also
exposes resource metric definitions.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmOperationCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_15.models.CsmOperationCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CsmOperationCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_operations_request(
template_url=self.list_operations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_operations_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CsmOperationCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/providers/Microsoft.Web/operations'} # type: ignore
@distributed_trace
def get_web_app_stacks(
self,
stack_os_type: Optional[Union[str, "_models.Enum14"]] = None,
**kwargs: Any
) -> Iterable["_models.WebAppStackCollection"]:
"""Get available Web app frameworks and their versions.
Description for Get available Web app frameworks and their versions.
:param stack_os_type: Stack OS Type.
:type stack_os_type: str or ~azure.mgmt.web.v2021_01_15.models.Enum14
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebAppStackCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_15.models.WebAppStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_web_app_stacks_request(
stack_os_type=stack_os_type,
template_url=self.get_web_app_stacks.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_web_app_stacks_request(
stack_os_type=stack_os_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WebAppStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_web_app_stacks.metadata = {'url': '/providers/Microsoft.Web/webAppStacks'} # type: ignore
@distributed_trace
def get_available_stacks_on_prem(
self,
os_type_selected: Optional[Union[str, "_models.Enum15"]] = None,
**kwargs: Any
) -> Iterable["_models.ApplicationStackCollection"]:
"""Get available application frameworks and their versions.
Description for Get available application frameworks and their versions.
:param os_type_selected:
:type os_type_selected: str or ~azure.mgmt.web.v2021_01_15.models.Enum15
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationStackCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_15.models.ApplicationStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_available_stacks_on_prem_request(
subscription_id=self._config.subscription_id,
os_type_selected=os_type_selected,
template_url=self.get_available_stacks_on_prem.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_available_stacks_on_prem_request(
subscription_id=self._config.subscription_id,
os_type_selected=os_type_selected,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ApplicationStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_available_stacks_on_prem.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/availableStacks'} # type: ignore
|
|
#!/usr/bin/env python
DOCUMENTATION = """
---
module: igw_purge
short_description: Provide a purge capability to remove an iSCSI gateway
environment
description:
- This module handles the removal of a gateway configuration from a ceph
environment.
The playbook that calls this module prompts the user for the type of purge
to perform.
The purge options are;
all ... purge all LIO configuration *and* delete all defined rbd images
lio ... purge only the LIO configuration (rbd's are left intact)
USE WITH CAUTION
To support module debugging, this module logs to
/var/log/ansible-module-igw_config.log on each target machine(s).
option:
mode:
description:
- the mode defines the type of purge requested
gateway ... remove the LIO configuration only
disks ... remove the rbd disks defined to the gateway
required: true
requirements: ['ceph-iscsi-config', 'python-rtslib']
author:
- 'Paul Cuzner'
"""
import os
import logging
import socket
from logging.handlers import RotatingFileHandler
from ansible.module_utils.basic import *
import ceph_iscsi_config.settings as settings
from ceph_iscsi_config.common import Config
from ceph_iscsi_config.lio import LIO, Gateway
from ceph_iscsi_config.utils import ipv4_addresses, get_ip
__author__ = 'pcuzner@redhat.com'
def delete_group(module, image_list, cfg):
logger.debug("RBD Images to delete are : {}".format(','.join(image_list)))
pending_list = list(image_list)
for rbd_path in image_list:
delete_rbd(module, rbd_path)
disk_key = rbd_path.replace('/', '.', 1)
cfg.del_item('disks', disk_key)
pending_list.remove(rbd_path)
cfg.changed = True
if cfg.changed:
cfg.commit()
return pending_list
def delete_rbd(module, rbd_path):
logger.debug("issuing delete for {}".format(rbd_path))
rm_cmd = 'rbd --no-progress --conf {} rm {}'.format(settings.config.cephconf,
rbd_path)
rc, rm_out, err = module.run_command(rm_cmd, use_unsafe_shell=True)
logger.debug("delete RC = {}, {}".format(rc, rm_out, err))
if rc != 0:
logger.error("Could not fully cleanup image {}. Manually run the rbd "
"command line tool to remove.".format(rbd_path))
def is_cleanup_host(config):
"""
decide which gateway host should be responsible for any non-specific
updates to the config object
:param config: configuration dict from the rados pool
:return: boolean indicating whether the addition cleanup should be
performed by the running host
"""
cleanup = False
if 'ip_list' in config.config["gateways"]:
gw_1 = config.config["gateways"]["ip_list"][0]
usable_ip = get_ip(gw_1)
if usable_ip != '0.0.0.0':
if usable_ip in ipv4_addresses():
cleanup = True
return cleanup
def ansible_main():
fields = {"mode": {"required": True,
"type": "str",
"choices": ["gateway", "disks"]
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
run_mode = module.params['mode']
changes_made = False
logger.info("START - GATEWAY configuration PURGE started, run mode "
"is {}".format(run_mode))
cfg = Config(logger)
this_host = socket.gethostname().split('.')[0]
perform_cleanup_tasks = is_cleanup_host(cfg)
#
# Purge gateway configuration, if the config has gateways
if run_mode == 'gateway' and len(cfg.config['gateways'].keys()) > 0:
lio = LIO()
gateway = Gateway(cfg)
if gateway.session_count() > 0:
module.fail_json(msg="Unable to purge - gateway still has active "
"sessions")
gateway.drop_target(this_host)
if gateway.error:
module.fail_json(msg=gateway.error_msg)
lio.drop_lun_maps(cfg, perform_cleanup_tasks)
if lio.error:
module.fail_json(msg=lio.error_msg)
if gateway.changed or lio.changed:
# each gateway removes it's own entry from the config
cfg.del_item("gateways", this_host)
if perform_cleanup_tasks:
cfg.reset = True
# drop all client definitions from the configuration object
client_names = cfg.config["clients"].keys()
for client in client_names:
cfg.del_item("clients", client)
cfg.del_item("gateways", "iqn")
cfg.del_item("gateways", "created")
cfg.del_item("gateways", "ip_list")
cfg.commit()
changes_made = True
elif run_mode == 'disks' and len(cfg.config['disks'].keys()) > 0:
#
# Remove the disks on this host, that have been registered in the
# config object
#
# if the owner field for a disk is set to this host, this host can
# safely delete it
# nb. owner gets set at rbd allocation and mapping time
images_left = []
# delete_list will contain a list of pool/image names where the owner
# is this host
delete_list = [key.replace('.', '/', 1) for key in cfg.config['disks']
if cfg.config['disks'][key]['owner'] == this_host]
if delete_list:
images_left = delete_group(module, delete_list, cfg)
# if the delete list still has entries we had problems deleting the
# images
if images_left:
module.fail_json(msg="Problems deleting the following rbd's : "
"{}".format(','.join(images_left)))
changes_made = cfg.changed
logger.debug("ending lock state variable {}".format(cfg.config_locked))
logger.info("END - GATEWAY configuration PURGE complete")
module.exit_json(changed=changes_made,
meta={"msg": "Purge of iSCSI settings ({}) "
"complete".format(run_mode)})
if __name__ == '__main__':
module_name = os.path.basename(__file__).replace('ansible_module_', '')
logger = logging.getLogger(os.path.basename(module_name))
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler('/var/log/ansible-module-igw_config.log',
maxBytes=5242880,
backupCount=7)
log_fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)-8s : '
'%(message)s')
handler.setFormatter(log_fmt)
logger.addHandler(handler)
settings.init()
ansible_main()
|
|
#!/usr/bin/env python
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# XenAPI plugin for reading/writing information to xenstore
#
try:
import json
except ImportError:
import simplejson as json
import logging
import os
import subprocess
import XenAPIPlugin
import pluginlib_nova as pluginlib
pluginlib.configure_logging("xenstore")
class XenstoreError(pluginlib.PluginError):
"""Errors that occur when calling xenstore-* through subprocesses"""
def __init__(self, cmd, return_code, stderr, stdout):
msg = "cmd: %s; returncode: %d; stderr: %s; stdout: %s"
msg = msg % (cmd, return_code, stderr, stdout)
self.cmd = cmd
self.return_code = return_code
self.stderr = stderr
self.stdout = stdout
pluginlib.PluginError.__init__(self, msg)
def jsonify(fnc):
def wrapper(*args, **kwargs):
ret = fnc(*args, **kwargs)
try:
json.loads(ret)
except ValueError:
# Value should already be JSON-encoded, but some operations
# may write raw sting values; this will catch those and
# properly encode them.
ret = json.dumps(ret)
return ret
return wrapper
def _record_exists(arg_dict):
"""Returns whether or not the given record exists. The record path
is determined from the given path and dom_id in the arg_dict."""
cmd = ["xenstore-exists", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
ret, result = _run_command(cmd)
except XenstoreError, e:
if e.stderr == '':
# if stderr was empty, this just means the path did not exist
return False
# otherwise there was a real problem
raise
return True
@jsonify
def read_record(self, arg_dict):
"""Returns the value stored at the given path for the given dom_id.
These must be encoded as key/value pairs in arg_dict. You can
optinally include a key 'ignore_missing_path'; if this is present
and boolean True, attempting to read a non-existent path will return
the string 'None' instead of raising an exception.
"""
cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
ret, result = _run_command(cmd)
return result.strip()
except XenstoreError, e:
if not arg_dict.get("ignore_missing_path", False):
raise
if not _record_exists(arg_dict):
return "None"
# Just try again in case the agent write won the race against
# the record_exists check. If this fails again, it will likely raise
# an equally meaningful XenstoreError as the one we just caught
ret, result = _run_command(cmd)
return result.strip()
@jsonify
def write_record(self, arg_dict):
"""Writes to xenstore at the specified path. If there is information
already stored in that location, it is overwritten. As in read_record,
the dom_id and path must be specified in the arg_dict; additionally,
you must specify a 'value' key, whose value must be a string. Typically,
you can json-ify more complex values and store the json output.
"""
cmd = ["xenstore-write",
"/local/domain/%(dom_id)s/%(path)s" % arg_dict,
arg_dict["value"]]
_run_command(cmd)
return arg_dict["value"]
@jsonify
def list_records(self, arg_dict):
"""Returns all the stored data at or below the given path for the
given dom_id. The data is returned as a json-ified dict, with the
path as the key and the stored value as the value. If the path
doesn't exist, an empty dict is returned.
"""
dirpath = "/local/domain/%(dom_id)s/%(path)s" % arg_dict
cmd = ["xenstore-ls", dirpath.rstrip("/")]
try:
ret, recs = _run_command(cmd)
except XenstoreError, e:
if not _record_exists(arg_dict):
return {}
# Just try again in case the path was created in between
# the "ls" and the existence check. If this fails again, it will
# likely raise an equally meaningful XenstoreError
ret, recs = _run_command(cmd)
base_path = arg_dict["path"]
paths = _paths_from_ls(recs)
ret = {}
for path in paths:
if base_path:
arg_dict["path"] = "%s/%s" % (base_path, path)
else:
arg_dict["path"] = path
rec = read_record(self, arg_dict)
try:
val = json.loads(rec)
except ValueError:
val = rec
ret[path] = val
return ret
@jsonify
def delete_record(self, arg_dict):
"""Just like it sounds: it removes the record for the specified
VM and the specified path from xenstore.
"""
cmd = ["xenstore-rm", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
ret, result = _run_command(cmd)
return result
def _paths_from_ls(recs):
"""The xenstore-ls command returns a listing that isn't terribly
useful. This method cleans that up into a dict with each path
as the key, and the associated string as the value.
"""
ret = {}
last_nm = ""
level = 0
path = []
ret = []
for ln in recs.splitlines():
nm, val = ln.rstrip().split(" = ")
barename = nm.lstrip()
this_level = len(nm) - len(barename)
if this_level == 0:
ret.append(barename)
level = 0
path = []
elif this_level == level:
# child of same parent
ret.append("%s/%s" % ("/".join(path), barename))
elif this_level > level:
path.append(last_nm)
ret.append("%s/%s" % ("/".join(path), barename))
level = this_level
elif this_level < level:
path = path[:this_level]
ret.append("%s/%s" % ("/".join(path), barename))
level = this_level
last_nm = barename
return ret
def _run_command(cmd):
"""Abstracts out the basics of issuing system commands. If the command
returns anything in stderr, a PluginError is raised with that information.
Otherwise, a tuple of (return code, stdout data) is returned.
"""
logging.info(' '.join(cmd))
pipe = subprocess.PIPE
proc = subprocess.Popen(cmd, stdin=pipe, stdout=pipe, stderr=pipe,
close_fds=True)
out, err = proc.communicate()
if proc.returncode is not os.EX_OK:
raise XenstoreError(cmd, proc.returncode, err, out)
return proc.returncode, out
if __name__ == "__main__":
XenAPIPlugin.dispatch(
{"read_record": read_record,
"write_record": write_record,
"list_records": list_records,
"delete_record": delete_record})
|
|
from contextlib import contextmanager
from ctypes import (CDLL, c_bool, c_int, c_int32, c_int64, c_double, c_char_p,
c_char, POINTER, Structure, c_void_p, create_string_buffer)
from warnings import warn
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import AllocationError
from . import _dll
from .error import _error_handler
import openmc.capi
class _Bank(Structure):
_fields_ = [('r', c_double*3),
('u', c_double*3),
('E', c_double),
('wgt', c_double),
('delayed_group', c_int),
('particle', c_int)]
# Define input type for numpy arrays that will be passed into C++ functions
# Must be an int or double array, with single dimension that is contiguous
_array_1d_int = np.ctypeslib.ndpointer(dtype=np.int32, ndim=1,
flags='CONTIGUOUS')
_array_1d_dble = np.ctypeslib.ndpointer(dtype=np.double, ndim=1,
flags='CONTIGUOUS')
_dll.openmc_calculate_volumes.restype = c_int
_dll.openmc_calculate_volumes.errcheck = _error_handler
_dll.openmc_finalize.restype = c_int
_dll.openmc_finalize.errcheck = _error_handler
_dll.openmc_find_cell.argtypes = [POINTER(c_double*3), POINTER(c_int32),
POINTER(c_int32)]
_dll.openmc_find_cell.restype = c_int
_dll.openmc_find_cell.errcheck = _error_handler
_dll.openmc_hard_reset.restype = c_int
_dll.openmc_hard_reset.errcheck = _error_handler
_dll.openmc_init.argtypes = [c_int, POINTER(POINTER(c_char)), c_void_p]
_dll.openmc_init.restype = c_int
_dll.openmc_init.errcheck = _error_handler
_dll.openmc_get_keff.argtypes = [POINTER(c_double*2)]
_dll.openmc_get_keff.restype = c_int
_dll.openmc_get_keff.errcheck = _error_handler
_init_linsolver_argtypes = [_array_1d_int, c_int, _array_1d_int, c_int, c_int,
c_double, _array_1d_int, _array_1d_int]
_dll.openmc_initialize_linsolver.argtypes = _init_linsolver_argtypes
_dll.openmc_initialize_linsolver.restype = None
_dll.openmc_master.restype = c_bool
_dll.openmc_next_batch.argtypes = [POINTER(c_int)]
_dll.openmc_next_batch.restype = c_int
_dll.openmc_next_batch.errcheck = _error_handler
_dll.openmc_plot_geometry.restype = c_int
_dll.openmc_plot_geometry.restype = _error_handler
_dll.openmc_run.restype = c_int
_dll.openmc_run.errcheck = _error_handler
_dll.openmc_reset.restype = c_int
_dll.openmc_reset.errcheck = _error_handler
_run_linsolver_argtypes = [_array_1d_dble, _array_1d_dble, _array_1d_dble,
c_double]
_dll.openmc_run_linsolver.argtypes = _run_linsolver_argtypes
_dll.openmc_run_linsolver.restype = c_int
_dll.openmc_source_bank.argtypes = [POINTER(POINTER(_Bank)), POINTER(c_int64)]
_dll.openmc_source_bank.restype = c_int
_dll.openmc_source_bank.errcheck = _error_handler
_dll.openmc_simulation_init.restype = c_int
_dll.openmc_simulation_init.errcheck = _error_handler
_dll.openmc_simulation_finalize.restype = c_int
_dll.openmc_simulation_finalize.errcheck = _error_handler
_dll.openmc_statepoint_write.argtypes = [c_char_p, POINTER(c_bool)]
_dll.openmc_statepoint_write.restype = c_int
_dll.openmc_statepoint_write.errcheck = _error_handler
def calculate_volumes():
"""Run stochastic volume calculation"""
_dll.openmc_calculate_volumes()
def current_batch():
"""Return the current batch of the simulation.
Returns
-------
int
Current batch of the simulation
"""
return c_int.in_dll(_dll, 'current_batch').value
def finalize():
"""Finalize simulation and free memory"""
_dll.openmc_finalize()
def find_cell(xyz):
"""Find the cell at a given point
Parameters
----------
xyz : iterable of float
Cartesian coordinates of position
Returns
-------
openmc.capi.Cell
Cell containing the point
int
If the cell at the given point is repeated in the geometry, this
indicates which instance it is, i.e., 0 would be the first instance.
"""
index = c_int32()
instance = c_int32()
_dll.openmc_find_cell((c_double*3)(*xyz), index, instance)
return openmc.capi.Cell(index=index.value), instance.value
def find_material(xyz):
"""Find the material at a given point
Parameters
----------
xyz : iterable of float
Cartesian coordinates of position
Returns
-------
openmc.capi.Material or None
Material containing the point, or None is no material is found
"""
index = c_int32()
instance = c_int32()
_dll.openmc_find_cell((c_double*3)(*xyz), index, instance)
mats = openmc.capi.Cell(index=index.value).fill
if isinstance(mats, (openmc.capi.Material, type(None))):
return mats
else:
return mats[instance.value]
def hard_reset():
"""Reset tallies, timers, and pseudo-random number generator state."""
_dll.openmc_hard_reset()
def init(args=None, intracomm=None):
"""Initialize OpenMC
Parameters
----------
args : list of str
Command-line arguments
intracomm : mpi4py.MPI.Intracomm or None
MPI intracommunicator
"""
if args is not None:
args = ['openmc'] + list(args)
argc = len(args)
# Create the argv array. Note that it is actually expected to be of
# length argc + 1 with the final item being a null pointer.
argv = (POINTER(c_char) * (argc + 1))()
for i, arg in enumerate(args):
argv[i] = create_string_buffer(arg.encode())
else:
argc = 0
argv = None
if intracomm is not None:
# If an mpi4py communicator was passed, convert it to void* to be passed
# to openmc_init
try:
from mpi4py import MPI
except ImportError:
intracomm = None
else:
address = MPI._addressof(intracomm)
intracomm = c_void_p(address)
_dll.openmc_init(argc, argv, intracomm)
def iter_batches():
"""Iterator over batches.
This function returns a generator-iterator that allows Python code to be run
between batches in an OpenMC simulation. It should be used in conjunction
with :func:`openmc.capi.simulation_init` and
:func:`openmc.capi.simulation_finalize`. For example:
.. code-block:: Python
with openmc.capi.run_in_memory():
openmc.capi.simulation_init()
for _ in openmc.capi.iter_batches():
# Look at convergence of tallies, for example
...
openmc.capi.simulation_finalize()
See Also
--------
openmc.capi.next_batch
"""
while True:
# Run next batch
status = next_batch()
# Provide opportunity for user to perform action between batches
yield
# End the iteration
if status != 0:
break
def keff():
"""Return the calculated k-eigenvalue and its standard deviation.
Returns
-------
tuple
Mean k-eigenvalue and standard deviation of the mean
"""
n = openmc.capi.num_realizations()
if n > 3:
# Use the combined estimator if there are enough realizations
k = (c_double*2)()
_dll.openmc_get_keff(k)
return tuple(k)
else:
# Otherwise, return the tracklength estimator
mean = c_double.in_dll(_dll, 'keff').value
std_dev = c_double.in_dll(_dll, 'keff_std').value \
if n > 1 else np.inf
return (mean, std_dev)
def master():
"""Return whether processor is master processor or not.
Returns
-------
bool
Whether is master processor or not
"""
return _dll.openmc_master()
def next_batch():
"""Run next batch.
Returns
-------
int
Status after running a batch (0=normal, 1=reached maximum number of
batches, 2=tally triggers reached)
"""
status = c_int()
_dll.openmc_next_batch(status)
return status.value
def plot_geometry():
"""Plot geometry"""
_dll.openmc_plot_geometry()
def reset():
"""Reset tallies and timers."""
_dll.openmc_reset()
def run():
"""Run simulation"""
_dll.openmc_run()
def simulation_init():
"""Initialize simulation"""
_dll.openmc_simulation_init()
def simulation_finalize():
"""Finalize simulation"""
_dll.openmc_simulation_finalize()
def source_bank():
"""Return source bank as NumPy array
Returns
-------
numpy.ndarray
Source sites
"""
# Get pointer to source bank
ptr = POINTER(_Bank)()
n = c_int64()
_dll.openmc_source_bank(ptr, n)
# Convert to numpy array with appropriate datatype
bank_dtype = np.dtype(_Bank)
return as_array(ptr, (n.value,)).view(bank_dtype)
def statepoint_write(filename=None, write_source=True):
"""Write a statepoint file.
Parameters
----------
filename : str or None
Path to the statepoint to write. If None is passed, a default name that
contains the current batch will be written.
write_source : bool
Whether or not to include the source bank in the statepoint.
"""
if filename is not None:
filename = c_char_p(filename.encode())
_dll.openmc_statepoint_write(filename, c_bool(write_source))
@contextmanager
def run_in_memory(**kwargs):
"""Provides context manager for calling OpenMC shared library functions.
This function is intended to be used in a 'with' statement and ensures that
OpenMC is properly initialized/finalized. At the completion of the 'with'
block, all memory that was allocated during the block is freed. For
example::
with openmc.capi.run_in_memory():
for i in range(n_iters):
openmc.capi.reset()
do_stuff()
openmc.capi.run()
Parameters
----------
**kwargs
All keyword arguments are passed to :func:`init`.
"""
init(**kwargs)
try:
yield
finally:
finalize()
class _DLLGlobal(object):
"""Data descriptor that exposes global variables from libopenmc."""
def __init__(self, ctype, name):
self.ctype = ctype
self.name = name
def __get__(self, instance, owner):
return self.ctype.in_dll(_dll, self.name).value
def __set__(self, instance, value):
self.ctype.in_dll(_dll, self.name).value = value
class _FortranObject(object):
def __repr__(self):
return "{}[{}]".format(type(self).__name__, self._index)
class _FortranObjectWithID(_FortranObject):
def __init__(self, uid=None, new=True, index=None):
# Creating the object has already been handled by __new__. In the
# initializer, all we do is make sure that the object returned has an ID
# assigned. If the array index of the object is out of bounds, an
# OutOfBoundsError will be raised here by virtue of referencing self.id
self.id
|
|
import requests
import os
from functools import wraps
import inspect
import sys
import re
# Pandas became an optional dependency, but we still want to track it
try:
import pandas
_PANDAS_FOUND = True
except ImportError:
_PANDAS_FOUND = False
import csv
class AlphaVantage(object):
""" Base class where the decorators and base function for the other
classes of this python wrapper will inherit from.
"""
_ALPHA_VANTAGE_API_URL = "https://www.alphavantage.co/query?"
_ALPHA_VANTAGE_MATH_MAP = ['SMA', 'EMA', 'WMA', 'DEMA', 'TEMA', 'TRIMA',
'T3', 'KAMA', 'MAMA']
_ALPHA_VANTAGE_DIGITAL_CURRENCY_LIST = \
"https://www.alphavantage.co/digital_currency_list/"
_RAPIDAPI_URL = "https://alpha-vantage.p.rapidapi.com/query?"
def __init__(self, key=None, output_format='json',
treat_info_as_error=True, indexing_type='date', proxy=None, rapidapi=False):
""" Initialize the class
Keyword Arguments:
key: Alpha Vantage api key
retries: Maximum amount of retries in case of faulty connection or
server not able to answer the call.
treat_info_as_error: Treat information from the api as errors
output_format: Either 'json', 'pandas' os 'csv'
indexing_type: Either 'date' to use the default date string given
by the alpha vantage api call or 'integer' if you just want an
integer indexing on your dataframe. Only valid, when the
output_format is 'pandas'
proxy: Dictionary mapping protocol or protocol and hostname to
the URL of the proxy.
rapidapi: Boolean describing whether or not the API key is
through the RapidAPI platform or not
"""
if key is None:
key = os.getenv('ALPHAVANTAGE_API_KEY')
if not key or not isinstance(key, str):
raise ValueError('The AlphaVantage API key must be provided '
'either through the key parameter or '
'through the environment variable '
'ALPHAVANTAGE_API_KEY. Get a free key '
'from the alphavantage website: '
'https://www.alphavantage.co/support/#api-key')
self.headers = {}
if rapidapi:
self.headers = {
'x-rapidapi-host': "alpha-vantage.p.rapidapi.com",
'x-rapidapi-key': key
}
self.rapidapi = rapidapi
self.key = key
self.output_format = output_format
if self.output_format == 'pandas' and not _PANDAS_FOUND:
raise ValueError("The pandas library was not found, therefore can "
"not be used as an output format, please install "
"manually")
self.treat_info_as_error = treat_info_as_error
# Not all the calls accept a data type appended at the end, this
# variable will be overridden by those functions not needing it.
self._append_type = True
self.indexing_type = indexing_type
self.proxy = proxy or {}
@classmethod
def _call_api_on_func(cls, func):
""" Decorator for forming the api call with the arguments of the
function, it works by taking the arguments given to the function
and building the url to call the api on it
Keyword Arguments:
func: The function to be decorated
"""
# Argument Handling
if sys.version_info[0] < 3:
# Deprecated since version 3.0
argspec = inspect.getargspec(func)
else:
argspec = inspect.getfullargspec(func)
try:
# Asumme most of the cases have a mixed between args and named
# args
positional_count = len(argspec.args) - len(argspec.defaults)
defaults = dict(
zip(argspec.args[positional_count:], argspec.defaults))
except TypeError:
if argspec.args:
# No defaults
positional_count = len(argspec.args)
defaults = {}
elif argspec.defaults:
# Only defaults
positional_count = 0
defaults = argspec.defaults
# Actual decorating
@wraps(func)
def _call_wrapper(self, *args, **kwargs):
used_kwargs = kwargs.copy()
# Get the used positional arguments given to the function
used_kwargs.update(zip(argspec.args[positional_count:],
args[positional_count:]))
# Update the dictionary to include the default parameters from the
# function
used_kwargs.update({k: used_kwargs.get(k, d)
for k, d in defaults.items()})
# Form the base url, the original function called must return
# the function name defined in the alpha vantage api and the data
# key for it and for its meta data.
function_name, data_key, meta_data_key = func(
self, *args, **kwargs)
base_url = AlphaVantage._RAPIDAPI_URL if self.rapidapi else AlphaVantage._ALPHA_VANTAGE_API_URL
url = "{}function={}".format(base_url, function_name)
for idx, arg_name in enumerate(argspec.args[1:]):
try:
arg_value = args[idx]
except IndexError:
arg_value = used_kwargs[arg_name]
if 'matype' in arg_name and arg_value:
# If the argument name has matype, we gotta map the string
# or the integer
arg_value = self.map_to_matype(arg_value)
if arg_value:
# Discard argument in the url formation if it was set to
# None (in other words, this will call the api with its
# internal defined parameter)
if isinstance(arg_value, tuple) or isinstance(arg_value, list):
# If the argument is given as list, then we have to
# format it, you gotta format it nicely
arg_value = ','.join(arg_value)
url = '{}&{}={}'.format(url, arg_name, arg_value)
# Allow the output format to be json or csv (supported by
# alphavantage api). Pandas is simply json converted.
if 'json' in self.output_format.lower() or 'csv' in self.output_format.lower():
oformat = self.output_format.lower()
elif 'pandas' in self.output_format.lower():
oformat = 'json'
else:
raise ValueError("Output format: {} not recognized, only json,"
"pandas and csv are supported".format(
self.output_format.lower()))
apikey_parameter = "" if self.rapidapi else "&apikey={}".format(
self.key)
if self._append_type:
url = '{}{}&datatype={}'.format(url, apikey_parameter, oformat)
else:
url = '{}{}'.format(url, apikey_parameter)
return self._handle_api_call(url), data_key, meta_data_key
return _call_wrapper
@classmethod
def _output_format_sector(cls, func, override=None):
""" Decorator in charge of giving the output its right format, either
json or pandas (replacing the % for usable floats, range 0-1.0)
Keyword Arguments:
func: The function to be decorated
override: Override the internal format of the call, default None
Returns:
A decorator for the format sector api call
"""
@wraps(func)
def _format_wrapper(self, *args, **kwargs):
json_response, data_key, meta_data_key = func(
self, *args, **kwargs)
if isinstance(data_key, list):
# Replace the strings into percentage
data = {key: {k: self.percentage_to_float(v)
for k, v in json_response[key].items()} for key in data_key}
else:
data = json_response[data_key]
# TODO: Fix orientation in a better way
meta_data = json_response[meta_data_key]
# Allow to override the output parameter in the call
if override is None:
output_format = self.output_format.lower()
elif 'json' or 'pandas' in override.lower():
output_format = override.lower()
# Choose output format
if output_format == 'json':
return data, meta_data
elif output_format == 'pandas':
data_pandas = pandas.DataFrame.from_dict(data,
orient='columns')
# Rename columns to have a nicer name
col_names = [re.sub(r'\d+.', '', name).strip(' ')
for name in list(data_pandas)]
data_pandas.columns = col_names
return data_pandas, meta_data
else:
raise ValueError('Format: {} is not supported'.format(
self.output_format))
return _format_wrapper
@classmethod
def _output_format(cls, func, override=None):
""" Decorator in charge of giving the output its right format, either
json or pandas
Keyword Arguments:
func: The function to be decorated
override: Override the internal format of the call, default None
"""
@wraps(func)
def _format_wrapper(self, *args, **kwargs):
call_response, data_key, meta_data_key = func(
self, *args, **kwargs)
if 'json' in self.output_format.lower() or 'pandas' \
in self.output_format.lower():
if data_key is not None:
data = call_response[data_key]
else:
data = call_response
if meta_data_key is not None:
meta_data = call_response[meta_data_key]
else:
meta_data = None
# Allow to override the output parameter in the call
if override is None:
output_format = self.output_format.lower()
elif 'json' or 'pandas' in override.lower():
output_format = override.lower()
# Choose output format
if output_format == 'json':
if isinstance(data, list):
# If the call returns a list, then we will append them
# in the resulting data frame. If in the future
# alphavantage decides to do more with returning arrays
# this might become buggy. For now will do the trick.
if not data:
data_pandas = pandas.DataFrame()
else:
data_array = []
for val in data:
data_array.append([v for _, v in val.items()])
data_pandas = pandas.DataFrame(data_array, columns=[
k for k, _ in data[0].items()])
return data_pandas, meta_data
else:
return data, meta_data
elif output_format == 'pandas':
if isinstance(data, list):
# If the call returns a list, then we will append them
# in the resulting data frame. If in the future
# alphavantage decides to do more with returning arrays
# this might become buggy. For now will do the trick.
if not data:
data_pandas = pandas.DataFrame()
else:
data_array = []
for val in data:
data_array.append([v for _, v in val.items()])
data_pandas = pandas.DataFrame(data_array, columns=[
k for k, _ in data[0].items()])
else:
try:
data_pandas = pandas.DataFrame.from_dict(data,
orient='index',
dtype='float')
# This is for Global quotes or any other new Alpha Vantage
# data that is added.
# It will have to be updated so that we can get exactly
# The dataframes we want moving forward
except ValueError:
data = {data_key: data}
data_pandas = pandas.DataFrame.from_dict(data,
orient='index',
dtype='object')
return data_pandas, meta_data
if 'integer' in self.indexing_type:
# Set Date as an actual column so a new numerical index
# will be created, but only when specified by the user.
data_pandas.reset_index(level=0, inplace=True)
data_pandas.index.name = 'index'
else:
data_pandas.index.name = 'date'
# convert to pandas._libs.tslibs.timestamps.Timestamp
data_pandas.index = pandas.to_datetime(
data_pandas.index)
return data_pandas, meta_data
elif 'csv' in self.output_format.lower():
return call_response, None
else:
raise ValueError('Format: {} is not supported'.format(
self.output_format))
return _format_wrapper
def set_proxy(self, proxy=None):
""" Set a new proxy configuration
Keyword Arguments:
proxy: Dictionary mapping protocol or protocol and hostname to
the URL of the proxy.
"""
self.proxy = proxy or {}
def map_to_matype(self, matype):
""" Convert to the alpha vantage math type integer. It returns an
integer correspondent to the type of math to apply to a function. It
raises ValueError if an integer greater than the supported math types
is given.
Keyword Arguments:
matype: The math type of the alpha vantage api. It accepts
integers or a string representing the math type.
* 0 = Simple Moving Average (SMA),
* 1 = Exponential Moving Average (EMA),
* 2 = Weighted Moving Average (WMA),
* 3 = Double Exponential Moving Average (DEMA),
* 4 = Triple Exponential Moving Average (TEMA),
* 5 = Triangular Moving Average (TRIMA),
* 6 = T3 Moving Average,
* 7 = Kaufman Adaptive Moving Average (KAMA),
* 8 = MESA Adaptive Moving Average (MAMA)
"""
# Check if it is an integer or a string
try:
value = int(matype)
if abs(value) > len(AlphaVantage._ALPHA_VANTAGE_MATH_MAP):
raise ValueError("The value {} is not supported".format(value))
except ValueError:
value = AlphaVantage._ALPHA_VANTAGE_MATH_MAP.index(matype)
return value
def _handle_api_call(self, url):
""" Handle the return call from the api and return a data and meta_data
object. It raises a ValueError on problems
Keyword Arguments:
url: The url of the service
data_key: The key for getting the data from the jso object
meta_data_key: The key for getting the meta data information out
of the json object
"""
response = requests.get(url, proxies=self.proxy, headers=self.headers)
if 'json' in self.output_format.lower() or 'pandas' in \
self.output_format.lower():
json_response = response.json()
if not json_response:
raise ValueError(
'Error getting data from the api, no return was given.')
elif "Error Message" in json_response:
raise ValueError(json_response["Error Message"])
elif "Information" in json_response and self.treat_info_as_error:
raise ValueError(json_response["Information"])
elif "Note" in json_response and self.treat_info_as_error:
raise ValueError(json_response["Note"])
return json_response
else:
csv_response = csv.reader(response.text.splitlines())
if not csv_response:
raise ValueError(
'Error getting data from the api, no return was given.')
return csv_response
|
|
"""
Seamless communion server
Upon startup:
- Reads all comma-separated URLs in SEAMLESS_COMMUNION_INCOMING and tries to establish communion with them
- Reads the port in SEAMLESS_COMMUNION_OUTGOING and listens on that port for incoming communion attempts
- Every Seamless instance has a unique and random identifier; communion is only established once for each ID
"""
"""
Servable things:
- Checksum to buffer (very generic; make it that incref is done for tf_checksum-to-transformation-JSON)
For this, there is a buffer status API, which can return:
-2: checksum unknown
-1: buffer too large
0: buffer available remotely
1: buffer available locally
- Checksum to bufferlength
- Semantic-to-syntactic checksum
- transformation jobs
- build module jobs
Jobs are submitted by checksum. There is also a job status API, which can return
a code and a return value. The return value depends on the code:
-3: Job checksum is unknown (cache miss in the server's checksum to buffer)
None is returned.
-2: Job input checksums are unknown. None is returned.
-1: Job is not runnable. None is returned.
0: Job has exception. Exception is returned as a string
1: Job is runnable. None is returned.
2: Job is running; progress and preliminary checksum are returned
3: Job is known; job checksum is returned.
Finally, the job API has an (async) wait method, that blocks until the job updates
(final result, preliminary result, or new progress)
Submitting a job is quick. After submission, the wait method is called.
Finally, the results are retrieved, resulting in a code 0, a code 3, or
occasionally a negative code (leading to re-evaluation).
The server may allow hard cancel/clear exception of a job (by checksum).
Normally, this is only done for servers behind a supervisor front-end, where
the supervisor can do load-balancing and retries where needed.
Checksum-to-buffer requests can be forwarded to remote Seamless instances,
(servant acting as a master) but job requests are not.
Jobs may include meta-data,
containing e.g. information about required packages, memory requirements,
estimated CPU time, etc.
However, this is beyond the scope of communion.
Meta-data for a job may be stored in a provenance server.
A supervisor might accept job requests and forward them to registered
Seamless servants, based on the meta-data that it retrieves from this server.
Likewise, the job status API never return an exception value or checksum.
A provenance server might store these exceptions based on the job checksum
and meta-data. These may be managed by a supervisor, which may decide its
"""
import time
MAX_STARTUP = 5
class CommunionError(Exception):
pass
import logging
logger = logging.getLogger("seamless")
def print_info(*args):
msg = " ".join([str(arg) for arg in args])
logger.info(msg)
def print_warning(*args):
msg = " ".join([str(arg) for arg in args])
logger.warning(msg)
def print_debug(*args):
msg = " ".join([str(arg) for arg in args])
logger.debug(msg)
def print_error(*args):
msg = " ".join([str(arg) for arg in args])
logger.error(msg)
def is_port_in_use(address, port): # KLUDGE: For some reason, websockets does not test this??
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex((address, port)) == 0
WAIT_TIME = 1.5 # time to wait for network connections after a new manager
import os, sys, asyncio, time, functools, json, traceback, base64, websockets
from weakref import WeakSet
from .communion_client import communion_client_manager
incoming = []
_incoming = os.environ.get("SEAMLESS_COMMUNION_INCOMING")
if _incoming:
for url in _incoming.split(","):
try:
# TODO: validate URL
incoming.append(url)
except TypeError:
print_error("SEAMLESS_COMMUNION_INCOMING: invalid URL '%s'" % url)
outgoing = None
_outgoing = os.environ.get("SEAMLESS_COMMUNION_OUTGOING")
if _outgoing:
try:
outgoing = int(_outgoing)
except TypeError:
print_error("SEAMLESS_COMMUNION_OUTGOING: invalid port '%s'" % outgoing)
outgoing_address = os.environ.get("SEAMLESS_COMMUNION_OUTGOING_ADDRESS")
if outgoing_address is None:
outgoing_address = "localhost"
# Default configuration for being a master, i.e. on using other peers as a service
default_master_config = {
"buffer": True,
"buffer_status": True,
"buffer_length": True,
"transformation_job": False,
"transformation_status": False,
"semantic_to_syntactic": True,
}
# Default configuration for being a servant, i.e. on providing services to other peers
default_servant_config = {
"buffer": "small", # only return small buffers (< 10 000 bytes)
"buffer_status": "small",
"buffer_length": True,
"transformation_job": False,
"transformation_status": False,
"semantic_to_syntactic": True,
"hard_cancel": False, # allow others to hard cancel our jobs
"clear_exception": False, # allow others to clear exceptions on our jobs
}
from .communion_encode import communion_encode, communion_decode
import numpy as np
class CommunionServer:
future = None
PROTOCOL = ("seamless", "communion", "0.2.1")
_started = False
_started_outgoing = False
_to_start_incoming = None
def __init__(self):
self.config_master = default_master_config.copy()
self.config_servant = default_servant_config.copy()
cid = os.environ.get("SEAMLESS_COMMUNION_ID")
if cid is None:
cid = hash(int(id(self)) + int(10000*time.time()))
self.id = cid
self.peers = {}
self.message_count = {}
self.futures = {}
self.ready = WeakSet()
def configure_master(self, config=None, **update):
if self._started_outgoing and any(list(update.values())):
print_warning("CommunionServer has already started, added functionality will not be taken into account for existing peers", file=sys.stderr)
if config is not None:
for key in config:
assert key in default_master_config, key
self.config_master = config.copy()
for key in update:
assert key in default_master_config, key
self.config_master.update(update)
def configure_servant(self, config=None, **update):
if self.future is not None:
raise Exception("Cannot configure CommunionServer, it has already started")
if config is not None:
for key in config:
assert key in default_servant_config, key
self.config_servant = config.copy()
self.config_servant.update(update)
async def _listen_peer(self, websocket, peer_config, incoming=False):
all_peer_ids = [peer["id"] for peer in self.peers.values()]
if peer_config["id"] in all_peer_ids:
return
if peer_config["protocol"] != list(self.PROTOCOL):
print_warning("Protocol mismatch, peer '%s': %s, our protocol: %s" % (peer_config["id"], peer_config["protocol"], self.PROTOCOL))
await websocket.send("Protocol mismatch: %s" % str(self.PROTOCOL))
websocket.close()
return
else:
await websocket.send("Protocol OK")
protocol_message = await websocket.recv()
if protocol_message != "Protocol OK":
return
print_debug("listen_peer", peer_config)
self.peers[websocket] = peer_config
self.message_count[websocket] = 1000 if incoming else 0
self.futures[websocket] = {}
communion_client_manager.add_servant(
websocket,
peer_config["id"],
config_servant=peer_config["servant"],
config_master=self.config_master
)
try:
while 1:
message = await websocket.recv()
asyncio.ensure_future(self._process_message_from_peer(websocket, message))
except (websockets.exceptions.ConnectionClosed, ConnectionResetError):
pass
except Exception:
print_error(traceback.format_exc())
finally:
self.peers.pop(websocket)
self.message_count.pop(websocket)
self.futures.pop(websocket)
communion_client_manager.remove_servant(websocket)
async def _connect_incoming(self, config, url, url0):
import websockets
def start_incoming():
try:
self._to_start_incoming.remove(url0)
except (ValueError, AttributeError):
pass
try:
ok = False
async with websockets.connect(url) as websocket:
await websocket.send(json.dumps(config))
peer_config = await websocket.recv()
peer_config = json.loads(peer_config)
print_warning("INCOMING", self.id, peer_config["id"])
start_incoming()
ok = True
await self._listen_peer(websocket, peer_config, incoming=True)
finally:
if not ok:
start_incoming()
async def _serve_outgoing(self, config, websocket, path):
peer_config = await websocket.recv()
peer_config = json.loads(peer_config)
print_warning("OUTGOING", self.id, peer_config["id"])
await websocket.send(json.dumps(config))
await self._listen_peer(websocket, peer_config)
async def _start(self):
if self._started:
return
config = {
"protocol": self.PROTOCOL,
"id": self.id,
"master": self.config_master,
"servant": self.config_servant
}
import websockets
coros = []
if outgoing is not None:
if is_port_in_use(outgoing_address, outgoing): # KLUDGE
print("ERROR: outgoing port %d already in use" % outgoing)
raise Exception
server = functools.partial(self._serve_outgoing, config)
coro_server = websockets.serve(server, outgoing_address, outgoing)
print("Set up a communion outgoing port %d" % outgoing)
if len(incoming):
for n in range(len(incoming)):
url = incoming[n]
try:
int(url)
url = "localhost:" + url
except ValueError:
pass
incoming[n] = url
self._to_start_incoming = incoming.copy()
for url in incoming:
url0 = url
if not url.startswith("ws://") and not url.startswith("wss://"):
url = "ws://" + url
coro = self._connect_incoming(config, url, url0)
coros.append(coro)
if outgoing is not None:
await coro_server
self._started_outgoing = True
if len(coros):
await asyncio.gather(*coros)
self._started = True
async def _startup(self):
print_debug("Communion server startup commencing")
try:
t = time.time()
while 1:
if communion_server._started_outgoing:
if communion_server._to_start_incoming is None or not len(communion_server._to_start_incoming):
break
await asyncio.sleep(0.05)
print_debug("Communion server startup waiting")
if time.time() - t > MAX_STARTUP:
print_error("Communion server startup timed out")
break
except:
import traceback
print_error("Communion server startup exception")
print_error(traceback.format_exc())
finally:
print_info("Communion server startup complete")
def start(self):
if self.future is not None:
return
coro = self._start()
self.future = asyncio.ensure_future(coro)
self.startup = asyncio.ensure_future(self._startup())
async def _process_transformation_request(self, transformation, transformer, peer):
try:
tcache = transformation_cache
remote_pins = []
for pinname in transformation:
if pinname.startswith("__"):
continue
celltype, subcelltype, sem_checksum = transformation[pinname]
checksum2 = await tcache.serve_semantic_to_syntactic(
sem_checksum, celltype, subcelltype,
peer
)
checksum2 = checksum2[0]
assert isinstance(checksum2, bytes)
buffer = buffer_cache.get_buffer(checksum2)
if buffer is not None:
continue
coro = get_buffer_remote(
checksum2,
peer
)
remote_pins.append((checksum2, coro))
if len(remote_pins):
buffers = await asyncio.gather(*[rp[1] for rp in remote_pins])
for n in range(len(buffers)):
buffer = buffers[n]
if buffer is not None:
buffer_cache.cache_buffer(remote_pins[n][0], buffer)
result = await tcache.incref_transformation(
transformation, transformer,
transformation_build_exception=None
)
if result is not None:
tf_checksum, tf_exc, result_checksum, prelim = result
if tf_exc is not None:
raise tf_exc
if result_checksum is None or prelim:
job = tcache.run_job(transformation, tf_checksum)
if job is not None:
await asyncio.shield(job.future)
except Exception as exc:
tcache.transformation_exceptions[transformer.tf_checksum] = exc
async def _process_request_from_peer(self, peer, message):
type = message["type"]
message_id = message["id"]
content = message["content"]
result = None
error = False
try:
if type == "transformation_hard_cancel":
assert self.config_servant["hard_cancel"]
checksum = bytes.fromhex(content)
transformation_cache.hard_cancel(tf_checksum=checksum)
result = "OK"
elif type == "transformation_clear_exception":
assert self.config_servant["clear_exception"]
checksum = bytes.fromhex(content)
transformation_cache.clear_exception(tf_checksum=checksum)
result = "OK"
elif type == "buffer_status":
assert self.config_servant[type]
checksum = bytes.fromhex(content)
async def func():
has_buffer = buffer_cache.buffer_check(checksum)
buffer_length = None
if has_buffer:
buffer_length = buffer_cache.get_buffer_length(checksum)
print_debug("STATUS SERVE BUFFER", buffer_length, checksum.hex())
if buffer_length is not None:
if buffer_length < 10000:
return 1
status = self.config_servant["buffer_status"]
if status == "small":
return -1
peer_id = self.peers[peer]["id"]
result = await communion_client_manager.remote_buffer_status(
checksum, peer_id
)
if result == True:
return 0
else:
return -2
result = await func()
print_info("BUFFER STATUS", checksum.hex(), result)
elif type == "buffer":
assert self.config_servant[type]
checksum = bytes.fromhex(content)
result = get_buffer(
checksum
)
if result is None:
peer_id = self.peers[peer]["id"]
result = await get_buffer_remote(
checksum,
remote_peer_id=peer_id
)
print_debug("BUFFER", checksum.hex(), result)
elif type == "buffer_length":
assert self.config_servant[type]
checksum = bytes.fromhex(content)
result = buffer_cache.get_buffer_length(checksum)
if result is None:
peer_id = self.peers[peer]["id"]
result = await get_buffer_length_remote(
checksum,
remote_peer_id=peer_id
)
print_info("BUFFERLENGTH", checksum.hex(), result)
elif type == "semantic_to_syntactic":
assert self.config_servant["semantic_to_syntactic"]
checksum, celltype, subcelltype = content
checksum = bytes.fromhex(checksum)
peer_id = self.peers[peer]["id"]
tcache = transformation_cache
result = await tcache.serve_semantic_to_syntactic(
checksum, celltype, subcelltype,
peer_id
)
if isinstance(result, list):
result = tuple([r.hex() for r in result])
elif type == "transformation_status":
assert self.config_servant[type]
checksum = bytes.fromhex(content)
peer_id = self.peers[peer]["id"]
tcache = transformation_cache
result = await tcache.serve_transformation_status(
checksum, peer_id
)
if isinstance(result[-1], bytes):
result = (*result[:-1], result[-1].hex())
elif type == "transformation_job":
assert self.config_servant[type]
checksum = bytes.fromhex(content)
peer_id = self.peers[peer]["id"]
transformer = RemoteTransformer(
checksum, peer_id
)
tcache = transformation_cache
transformation = await tcache.serve_get_transformation(checksum, peer_id)
coro = self._process_transformation_request(
transformation, transformer, peer
)
asyncio.ensure_future(coro)
result = "OK"
elif type == "transformation_wait":
checksum = bytes.fromhex(content)
peer_id = self.peers[peer]["id"]
tcache = transformation_cache
await tcache.remote_wait(checksum, peer_id)
result = "OK"
elif type == "transformation_cancel":
assert self.config_servant["transformation_job"]
checksum = bytes.fromhex(content)
peer_id = self.peers[peer]["id"]
tcache = transformation_cache
key = checksum, peer_id
transformation = await tcache.serve_get_transformation(checksum, peer_id)
rem_transformer = tcache.remote_transformers.get(key)
if key is not None:
tcache.decref_transformation(transformation, rem_transformer)
except Exception as exc:
print_error(traceback.format_exc())
error = True
result = repr(exc)
finally:
print_debug("REQUEST", message_id)
response = {
"mode": "response",
"id": message_id,
"content": result
}
if error:
response["error"] = True
msg = communion_encode(response)
assert isinstance(msg, bytes)
try:
peer_id = self.peers[peer]["id"]
print_info(" Communion response: send %d bytes to peer '%s' (#%d)" % (len(msg), peer_id, response["id"]))
print_debug(" RESPONSE:", msg, "/RESPONSE")
except KeyError:
pass
else:
await peer.send(msg)
def _process_response_from_peer(self, peer, message):
message_id = message["id"]
content = message["content"]
print_debug("RESPONSE", message_id)
future = self.futures[peer][message_id]
if message.get("error"):
future.set_exception(CommunionError(content))
else:
if not future.cancelled():
future.set_result(content)
async def _process_message_from_peer(self, peer, msg):
message = communion_decode(msg)
peer_id = self.peers[peer]["id"]
report = " Communion %s: receive %d bytes from peer '%s' (#%d)"
print_info(report % (message["mode"], len(msg), peer_id, message["id"]), message.get("type"))
print_debug("message from peer", self.peers[peer]["id"], ": ", message)
mode = message["mode"]
assert mode in ("request", "response"), mode
if mode == "request":
return await self._process_request_from_peer(peer, message)
else:
return self._process_response_from_peer(peer, message)
async def client_submit(self, message, peer):
assert peer in self.peers, (peer, self.peers.keys())
message_id = self.message_count[peer] + 1
self.message_count[peer] = message_id
future = asyncio.Future()
self.futures[peer][message_id] = future
message = message.copy()
message.update({
"mode": "request",
"id": message_id,
})
msg = communion_encode(message)
peer_id = self.peers[peer]["id"]
print_info(" Communion request: send %d bytes to peer '%s' (#%d)" % (len(msg), peer_id, message["id"]), message["type"])
await peer.send(msg)
result = await future
self.futures[peer].pop(message_id)
return result
communion_server = CommunionServer()
from .core.cache.transformation_cache import transformation_cache, RemoteTransformer
from .core.cache.buffer_cache import buffer_cache
from .core.protocol.get_buffer import get_buffer, get_buffer_remote, get_buffer_length_remote
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Windbarb Visual and shader definitions.
"""
import numpy as np
from vispy.color import ColorArray
from vispy.gloo import VertexBuffer
from vispy.visuals.shaders import Variable
from vispy.visuals.visual import Visual
vert = """
uniform float u_antialias;
uniform float u_px_scale;
uniform float u_scale;
attribute vec3 a_position;
attribute vec2 a_wind;
attribute vec4 a_fg_color;
attribute vec4 a_bg_color;
attribute float a_edgewidth;
attribute float a_size;
attribute float a_trig;
varying vec4 v_fg_color;
varying vec4 v_bg_color;
varying vec2 v_wind;
varying float v_trig;
varying float v_edgewidth;
varying float v_antialias;
void main (void) {
$v_size = a_size * u_px_scale * u_scale;
v_edgewidth = a_edgewidth * float(u_px_scale);
v_wind = a_wind.xy;
v_trig = a_trig;
v_antialias = u_antialias;
v_fg_color = a_fg_color;
v_bg_color = a_bg_color;
gl_Position = $transform(vec4(a_position,1.0));
float edgewidth = max(v_edgewidth, 1.0);
gl_PointSize = ($v_size) + 4.*(edgewidth + 1.5*v_antialias);
}
"""
frag = """
#include "math/constants.glsl"
#include "math/signed-segment-distance.glsl"
#include "antialias/antialias.glsl"
varying vec4 v_fg_color;
varying vec4 v_bg_color;
varying vec2 v_wind;
varying float v_trig;
varying float v_edgewidth;
varying float v_antialias;
// SDF-Triangle by @rougier
// https://github.com/rougier/python-opengl/blob/master/code/chapter-06/SDF-triangle.py
float sdf_triangle(vec2 p, vec2 p0, vec2 p1, vec2 p2)
{
vec2 e0 = p1 - p0;
vec2 e1 = p2 - p1;
vec2 e2 = p0 - p2;
vec2 v0 = p - p0;
vec2 v1 = p - p1;
vec2 v2 = p - p2;
vec2 pq0 = v0 - e0*clamp( dot(v0,e0)/dot(e0,e0), 0.0, 1.0 );
vec2 pq1 = v1 - e1*clamp( dot(v1,e1)/dot(e1,e1), 0.0, 1.0 );
vec2 pq2 = v2 - e2*clamp( dot(v2,e2)/dot(e2,e2), 0.0, 1.0 );
float s = sign( e0.x*e2.y - e0.y*e2.x );
vec2 d = min( min( vec2( dot( pq0, pq0 ), s*(v0.x*e0.y-v0.y*e0.x) ),
vec2( dot( pq1, pq1 ), s*(v1.x*e1.y-v1.y*e1.x) )),
vec2( dot( pq2, pq2 ), s*(v2.x*e2.y-v2.y*e2.x) ));
return -sqrt(d.x)*sign(d.y);
}
void main()
{
// Discard plotting marker body and edge if zero-size
if ($v_size <= 0.)
discard;
float edgewidth = max(v_edgewidth, 1.0);
float linewidth = max(v_edgewidth, 1.0);
float edgealphafactor = min(v_edgewidth, 1.0);
float size = $v_size + 4.*(edgewidth + 1.5*v_antialias);
// factor 6 for acute edge angles that need room as for star marker
vec2 wind = v_wind;
if (v_trig > 0.)
{
float u = wind.x * cos(radians(wind.y));
float v = wind.x * sin(radians(wind.y));
wind = vec2(u, v);
}
// knots to m/s
wind *= 2.;
// normalized distance
float dx = 0.5;
// normalized center point
vec2 O = vec2(dx);
// normalized x-component
vec2 X = normalize(wind) * dx / M_SQRT2 / 1.1 * vec2(1, -1);
// normalized y-component
// here the barb can be mirrored for southern earth * (vec2(1., -1.)
//vec2 Y = X.yx * vec2(1., -1.); // southern hemisphere
vec2 Y = X.yx * vec2(-1., 1.); // northern hemisphere
// PointCoordinate
vec2 P = gl_PointCoord;
// calculate barb items
float speed = length(wind);
int flag = int(floor(speed / 50.));
speed -= float (50 * flag);
int longbarb = int(floor(speed / 10.));
speed -= float (longbarb * 10);
int shortbarb = int(floor(speed / 5.));
int calm = shortbarb + longbarb + flag;
// starting distance
float r;
// calm, plot circles
if (calm == 0)
{
r = abs(length(O-P)- dx * 0.2);
r = min(r, abs(length(O-P)- dx * 0.1));
}
else
{
// plot shaft
r = segment_distance(P, O, O-X);
float pos = 1.;
// plot flag(s)
while(flag >= 1)
{
r = min(r, sdf_triangle(P, O-X*pos, O-X*pos-X*.4-Y*.4, O-X*pos-X*.4));
flag -= 1;
pos -= 0.15;
}
// plot longbarb(s)
while(longbarb >= 1)
{
r = min(r, segment_distance(P, O-X*pos, O-X*pos-X*.4-Y*.4));
longbarb -= 1;
pos -= 0.15;
}
// plot shortbarb
while(shortbarb >= 1)
{
if (pos == 1.0)
pos -= 0.15;
r = min(r, segment_distance(P, O-X*pos, O-X*pos-X*.2-Y*.2));
shortbarb -= 1;
pos -= 0.15;
}
}
// apply correction for size
r *= size;
vec4 edgecolor = vec4(v_fg_color.rgb, edgealphafactor*v_fg_color.a);
if (r > 0.5 * v_edgewidth + v_antialias)
{
// out of the marker (beyond the outer edge of the edge
// including transition zone due to antialiasing)
discard;
}
gl_FragColor = filled(r, edgewidth, v_antialias, edgecolor);
}
"""
class WindbarbVisual(Visual):
""" Visual displaying windbarbs.
"""
def __init__(self, **kwargs):
self._vbo = VertexBuffer()
self._v_size_var = Variable('varying float v_size')
self._marker_fun = None
self._data = None
Visual.__init__(self, vcode=vert, fcode=frag)
self.shared_program.vert['v_size'] = self._v_size_var
self.shared_program.frag['v_size'] = self._v_size_var
self.set_gl_state(depth_test=True, blend=True,
blend_func=('src_alpha', 'one_minus_src_alpha'))
self._draw_mode = 'points'
if len(kwargs) > 0:
self.set_data(**kwargs)
self.freeze()
def set_data(self, pos=None, wind=None, trig=True, size=50.,
antialias=1., edge_width=1., edge_color='black',
face_color='white'):
""" Set the data used to display this visual.
Parameters
----------
pos : array
The array of locations to display each windbarb.
wind : array
The array of wind vector components to display each windbarb.
in m/s. For knots divide by two.
trig : bool
True - wind contains (mag, ang)
False - wind contains (u, v)
defaults to True
size : float or array
The windbarb size in px.
antialias : float
The antialiased area (in pixels).
edge_width : float | None
The width of the windbarb outline in pixels.
edge_color : Color | ColorArray
The color used to draw each symbol outline.
face_color : Color | ColorArray
The color used to draw each symbol interior.
"""
assert (isinstance(pos, np.ndarray) and
pos.ndim == 2 and pos.shape[1] in (2, 3))
assert (isinstance(wind, np.ndarray) and
pos.ndim == 2 and pos.shape[1] == 2)
if edge_width < 0:
raise ValueError('edge_width cannot be negative')
# since the windbarb starts in the fragment center,
# we need to multiply by 2 for correct length
size *= 2
edge_color = ColorArray(edge_color).rgba
if len(edge_color) == 1:
edge_color = edge_color[0]
face_color = ColorArray(face_color).rgba
if len(face_color) == 1:
face_color = face_color[0]
n = len(pos)
data = np.zeros(n, dtype=[('a_position', np.float32, 3),
('a_wind', np.float32, 2),
('a_trig', np.float32, 0),
('a_fg_color', np.float32, 4),
('a_bg_color', np.float32, 4),
('a_size', np.float32),
('a_edgewidth', np.float32)])
data['a_fg_color'] = edge_color
data['a_bg_color'] = face_color
data['a_edgewidth'] = edge_width
data['a_position'][:, :pos.shape[1]] = pos
data['a_wind'][:, :wind.shape[1]] = wind
if trig:
data['a_trig'] = 1.
else:
data['a_trig'] = 0.
data['a_size'] = size
self.shared_program['u_antialias'] = antialias
self._data = data
self._vbo.set_data(data)
self.shared_program.bind(self._vbo)
self.update()
def _prepare_transforms(self, view):
xform = view.transforms.get_transform()
view.view_program.vert['transform'] = xform
def _prepare_draw(self, view):
view.view_program['u_px_scale'] = view.transforms.pixel_scale
view.view_program['u_scale'] = 1
def _compute_bounds(self, axis, view):
pos = self._data['a_position']
if pos is None:
return None
if pos.shape[1] > axis:
return (pos[:, axis].min(), pos[:, axis].max())
else:
return (0, 0)
|
|
# Copyright (c) 2016, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe, re, frappe.utils
from frappe.desk.notifications import get_notifications
from frappe import _
class BotParser(object):
'''Base class for bot parser'''
def __init__(self, reply, query):
self.query = query
self.reply = reply
self.tables = reply.tables
self.doctype_names = reply.doctype_names
def has(self, *words):
'''return True if any of the words is present int the query'''
for word in words:
if re.search(r'\b{0}\b'.format(word), self.query):
return True
def startswith(self, *words):
'''return True if the query starts with any of the given words'''
for w in words:
if self.query.startswith(w):
return True
def strip_words(self, query, *words):
'''Remove the given words from the query'''
for word in words:
query = re.sub(r'\b{0}\b'.format(word), '', query)
return query.strip()
def format_list(self, data):
'''Format list as markdown'''
return ', '.join(['[{name}](#Form/{doctype}/{name})'.format(doctype=self.get_doctype(),
name=d.name) for d in data])
def get_doctype(self):
'''returns the doctype name from self.tables'''
return self.doctype_names[self.tables[0]]
class ShowNotificationBot(BotParser):
'''Show open notifications'''
def get_reply(self):
if self.has("whatsup", "what's up", "wassup", "whats up"):
n = get_notifications()
open_items = sorted(n.get('open_count_doctype').items())
if open_items:
return ("Following items need your attention:\n\n"
+ "\n\n".join(["{0} [{1}](#List/{1})".format(d[1], d[0])
for d in open_items if d[1] > 0]))
else:
return 'Take it easy, nothing urgent needs your attention'
class GetOpenListBot(BotParser):
'''Get list of open items'''
def get_reply(self):
if self.startswith('open', 'show open', 'list open', 'get open'):
if self.tables:
doctype = self.get_doctype()
from frappe.desk.notifications import get_notification_config
filters = get_notification_config().get('for_doctype').get(doctype, None)
if filters:
if isinstance(filters, dict):
data = frappe.get_list(doctype, filters=filters)
else:
data = [{'name':d[0], 'title':d[1]} for d in frappe.get_attr(filters)(as_list=True)]
return ", ".join('[{title}](#Form/{doctype}/{name})'.format(doctype=doctype,
name=d.get('name'), title=d.get('title') or d.get('name')) for d in data)
else:
return _("Can't identify open {0}. Try something else.").format(doctype)
class ListBot(BotParser):
def get_reply(self):
if self.startswith('list', 'show'):
self.tables = self.reply.identify_tables(self.query.split(None, 1)[1])
if self.tables:
return self.format_list(frappe.get_list(self.get_doctype()))
class CountBot(BotParser):
def get_reply(self):
if self.startswith('how many'):
self.tables = self.reply.identify_tables(self.query.split(None, 1)[1])
if self.tables:
return str(frappe.db.sql('select count(*) from `tab{0}`'.format(self.get_doctype()))[0][0])
class FindBot(BotParser):
def get_reply(self):
if self.startswith('find', 'search'):
query = self.query.split(None, 1)[1]
if self.has('from'):
text, table = query.split('from')
if self.has('in'):
text, table = query.split('in')
if table:
text = text.strip()
self.tables = self.reply.identify_tables(table.strip())
if self.tables:
filters = {'name': ('like', '%{0}%'.format(text))}
or_filters = None
title_field = frappe.get_meta(self.get_doctype()).title_field
if title_field and title_field!='name':
or_filters = {'title': ('like', '%{0}%'.format(text))}
data = frappe.get_list(self.get_doctype(),
filters=filters, or_filters=or_filters)
if data:
return self.format_list(data)
else:
return _("Could not find {0} in {1}").format(text, self.get_doctype())
else:
self.out = _("Could not identify {0}").format(table)
else:
self.out = _("You can find things by asking 'find orange in customers'").format(table)
class BotReply(object):
'''Build a reply for the bot by calling all parsers'''
def __init__(self):
self.tables = []
def get_reply(self, query):
self.query = query.lower()
self.setup()
self.pre_process()
# basic replies
if self.query.split()[0] in ("hello", "hi"):
return _("Hello {0}").format(frappe.utils.get_fullname())
if self.query == "help":
return help_text.format(frappe.utils.get_fullname())
# build using parsers
replies = []
for parser in frappe.get_hooks('bot_parsers'):
reply = None
try:
reply = frappe.get_attr(parser)(self, query).get_reply()
except frappe.PermissionError:
reply = _("Oops, you are not allowed to know that")
if reply:
replies.append(reply)
if replies:
return '\n\n'.join(replies)
if not reply:
return _("Don't know, ask 'help'")
def setup(self):
self.setup_tables()
self.identify_tables()
def pre_process(self):
if self.query.endswith("?"):
self.query = self.query[:-1]
if self.query in ("todo", "to do"):
self.query = "open todo"
def setup_tables(self):
tables = frappe.get_all("DocType", {"is_table": 0})
self.all_tables = [d.name.lower() for d in tables]
self.doctype_names = {d.name.lower():d.name for d in tables}
def identify_tables(self, query=None):
if not query:
query = self.query
self.tables = []
for t in self.all_tables:
if t in query or t[:-1] in query:
self.tables.append(t)
return self.tables
help_text = """Hello {0}, I am a K.I.S.S Bot, not AI, so be kind. I can try answering a few questions like,
- "todo": list my todos
- "show customers": list customers
- "locate shirt": find where to find item "shirt"
- "open issues": find open issues, try "open sales orders"
- "how many users": count number of users
- "find asian in sales orders": find sales orders where name or title has "asian"
have fun!
"""
|
|
import os
import numpy as np
import tensorflow as tf
from layer_factory import layer_factory
from tensorflow.python.framework import tensor_shape
class vae_wo_skipconn:
def __init__(self, flags, nch=2, condinference_flag=False):
self.flags = flags
self.nch = nch
self.layer_factory = layer_factory()
self.condinference_flag = condinference_flag
#Returns handles to input placeholders
def inputs(self):
inp_img = tf.placeholder(tf.float32, [self.flags.batch_size, \
self.nch * self.flags.img_height * self.flags.img_width])
inp_greylevel = tf.placeholder(tf.float32, [self.flags.batch_size, \
self.flags.img_height * self.flags.img_width])
inp_latent = tf.placeholder(tf.float32, [self.flags.batch_size, \
self.flags.hidden_size])
is_training = tf.placeholder(tf.bool)
is_training_dec = tf.placeholder(tf.bool)
keep_prob = tf.placeholder(tf.float32)
kl_weight = tf.placeholder(tf.float32)
lossweights = tf.placeholder(tf.float32, [self.flags.batch_size, \
self.nch * self.flags.img_height * self.flags.img_width])
return inp_img, inp_greylevel, inp_latent, is_training, keep_prob, kl_weight, lossweights
#Takes input placeholders, builds inference graph and returns net. outputs
def inference(self, inp_img, inp_greylevel, inp_latent, is_training, keep_prob):
with tf.variable_scope('Inference', reuse=False) as sc:
z1_train = self.__encoder(sc, inp_img, is_training, keep_prob, \
in_nch=self.nch, reuse=False)
epsilon_train = tf.truncated_normal([self.flags.batch_size, self.flags.hidden_size])
mean_train = z1_train[:, :self.flags.hidden_size]
stddev_train = tf.sqrt(tf.exp(z1_train[:, self.flags.hidden_size:]))
z1_sample = mean_train + epsilon_train * stddev_train
output_train = self.__decoder(sc, is_training, inp_greylevel, z1_sample, reuse=False)
with tf.variable_scope('Inference', reuse=True) as sc:
if(self.condinference_flag == False):
z1_test = self.__encoder(sc, inp_img, is_training, keep_prob, \
in_nch=self.nch, reuse=True)
epsilon_test = tf.truncated_normal([self.flags.batch_size, self.flags.hidden_size])
mean_test = z1_test[:, :self.flags.hidden_size]
stddev_test = tf.sqrt(tf.exp(z1_test[:, self.flags.hidden_size:]))
z1_sample = mean_test + epsilon_test * stddev_test
tf.stop_gradient(z1_sample) #Fix the encoder
output_test = self.__decoder(sc, is_training, inp_greylevel, z1_sample, reuse=True)
output_condinference = None
else:
mean_test = None
stddev_test = None
output_test = None
output_condinference = self.__decoder(sc, is_training, inp_greylevel, inp_latent,\
reuse=True)
return mean_train, stddev_train, output_train, mean_test, stddev_test, \
output_test, output_condinference
#Takes net. outputs and computes loss for vae(enc+dec)
def loss(self, target_tensor, op_tensor, mean, stddev, kl_weight, lossweights, epsilon=1e-6, \
is_regression=True):
kl_loss = tf.reduce_sum(0.5 * (tf.square(mean) + tf.square(stddev) \
- tf.log(tf.maximum(tf.square(stddev), epsilon)) - 1.0))
recon_loss_chi = tf.reduce_mean(tf.sqrt(tf.reduce_sum( \
lossweights*tf.square(target_tensor-op_tensor), 1)), 0)
#Load Principle components
np_pcvec = np.transpose(np.load(os.path.join(self.flags.pc_dir, 'components.mat.npy')))
np_pcvar = 1./np.load(os.path.join(self.flags.pc_dir, 'exp_variance.mat.npy'))
np_pcvec = np_pcvec[:, :self.flags.pc_comp]
np_pcvar = np_pcvar[:self.flags.pc_comp]
pcvec = tf.constant(np_pcvec)
pcvar = tf.constant(np_pcvar)
projmat_op = tf.matmul(op_tensor, pcvec)
projmat_target = tf.matmul(target_tensor, pcvec)
weightmat = tf.tile(tf.reshape(pcvar, [1, self.flags.pc_comp]), [self.flags.batch_size, 1])
loss_topk_pc = tf.reduce_mean(tf.reduce_sum(\
tf.multiply(tf.square(projmat_op-projmat_target), weightmat), 1), 0)
res_op = op_tensor
res_target = target_tensor
for npc in range(self.flags.pc_comp):
pcvec_curr = tf.tile(tf.reshape(tf.transpose(pcvec[:, npc]), [1, -1]), \
[self.flags.batch_size, 1])
projop_curr = tf.tile(tf.reshape(projmat_op[:, npc], [self.flags.batch_size, 1]), \
[1, self.nch * self.flags.img_height * self.flags.img_width])
projtarget_curr = tf.tile(tf.reshape(projmat_target[:, npc], [self.flags.batch_size, 1]), \
[1, self.nch * self.flags.img_height * self.flags.img_width])
res_op = tf.subtract(res_op, tf.multiply(projop_curr, pcvec_curr))
res_target = tf.subtract(res_target, tf.multiply(projtarget_curr, pcvec_curr))
res_error = tf.reduce_sum(tf.square(res_op-res_target), 1)
res_error_weight = tf.tile(tf.reshape(pcvar[self.flags.pc_comp-1], [1, 1]), [self.flags.batch_size, 1])
loss_res_pc = tf.reduce_mean(tf.multiply(\
tf.reshape(res_error, [self.flags.batch_size, 1]), res_error_weight))
recon_loss = recon_loss_chi + (1e-1)*(loss_topk_pc + loss_res_pc)
if(self.nch == 2):
target_tensor2d = tf.reshape(target_tensor, [self.flags.batch_size, \
self.flags.img_height, self.flags.img_width, self.nch])
op_tensor2d = tf.reshape(op_tensor, [self.flags.batch_size, \
self.flags.img_height, self.flags.img_width, self.nch])
[n,w,h,c] = target_tensor2d.get_shape().as_list()
dv = tf.square((target_tensor2d[:,1:,:h-1,:] - target_tensor2d[:,:w-1,:h-1,:])
- (op_tensor2d[:,1:,:h-1,:] - op_tensor2d[:,:w-1,:h-1,:]))
dh = tf.square((target_tensor2d[:,:w-1,1:,:] - target_tensor2d[:,:w-1,:h-1,:])
- (op_tensor2d[:,:w-1,1:,:] - op_tensor2d[:,:w-1,:h-1,:]))
grad_loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(dv+dh,[1,2,3])))
recon_loss = recon_loss + (1e-3)*grad_loss
loss = kl_weight*kl_loss + recon_loss
tf.summary.scalar('grad_loss', grad_loss)
tf.summary.scalar('kl_loss', kl_loss)
tf.summary.scalar('recon_loss_chi', recon_loss_chi)
tf.summary.scalar('recon_loss', recon_loss)
return loss
#Takes loss and returns GD train step
def optimize(self, loss, epsilon):
train_step = tf.train.AdamOptimizer(self.flags.lr_vae, epsilon=epsilon).minimize(loss)
return train_step
def __encoder(self, scope, input_tensor, bn_is_training, keep_prob, in_nch=1, reuse=False):
lf = self.layer_factory
input_tensor2d = tf.reshape(input_tensor, [self.flags.batch_size, \
self.flags.img_height, self.flags.img_width, in_nch])
if(self.nch == 1 and reuse==False):
tf.image_summary('summ_input_tensor2d', input_tensor2d, max_images=10)
nch = tensor_shape.as_dimension(input_tensor2d.get_shape()[3]).value
if(reuse==False):
W_conv1 = lf.weight_variable(name='W_conv1', shape=[5, 5, nch, 128])
W_conv2 = lf.weight_variable(name='W_conv2', shape=[5, 5, 128, 256])
W_conv3 = lf.weight_variable(name='W_conv3', shape=[5, 5, 256, 512])
W_conv4 = lf.weight_variable(name='W_conv4', shape=[4, 4, 512, 1024])
W_fc1 = lf.weight_variable(name='W_fc1', shape=[4*4*1024, self.flags.hidden_size * 2])
b_conv1 = lf.bias_variable(name='b_conv1', shape=[128])
b_conv2 = lf.bias_variable(name='b_conv2', shape=[256])
b_conv3 = lf.bias_variable(name='b_conv3', shape=[512])
b_conv4 = lf.bias_variable(name='b_conv4', shape=[1024])
b_fc1 = lf.bias_variable(name='b_fc1', shape=[self.flags.hidden_size * 2])
else:
W_conv1 = lf.weight_variable(name='W_conv1')
W_conv2 = lf.weight_variable(name='W_conv2')
W_conv3 = lf.weight_variable(name='W_conv3')
W_conv4 = lf.weight_variable(name='W_conv4')
W_fc1 = lf.weight_variable(name='W_fc1')
b_conv1 = lf.bias_variable(name='b_conv1')
b_conv2 = lf.bias_variable(name='b_conv2')
b_conv3 = lf.bias_variable(name='b_conv3')
b_conv4 = lf.bias_variable(name='b_conv4')
b_fc1 = lf.bias_variable(name='b_fc1')
conv1 = tf.nn.relu(lf.conv2d(input_tensor2d, W_conv1, stride=2) + b_conv1)
conv1_norm = lf.batch_norm_aiuiuc_wrapper(conv1, bn_is_training, \
'BN1', reuse_vars=reuse)
conv2 = tf.nn.relu(lf.conv2d(conv1_norm, W_conv2, stride=2) + b_conv2)
conv2_norm = lf.batch_norm_aiuiuc_wrapper(conv2, bn_is_training, \
'BN2', reuse_vars=reuse)
conv3 = tf.nn.relu(lf.conv2d(conv2_norm, W_conv3, stride=2) + b_conv3)
conv3_norm = lf.batch_norm_aiuiuc_wrapper(conv3, bn_is_training, \
'BN3', reuse_vars=reuse)
conv4 = tf.nn.relu(lf.conv2d(conv3_norm, W_conv4, stride=2) + b_conv4)
conv4_norm = lf.batch_norm_aiuiuc_wrapper(conv4, bn_is_training, \
'BN4', reuse_vars=reuse)
dropout1 = tf.nn.dropout(conv4_norm, keep_prob)
flatten1 = tf.reshape(dropout1, [-1, 4*4*1024])
fc1 = tf.matmul(flatten1, W_fc1)+b_fc1
return fc1
def __decoder(self, scope, bn_is_training, inp_greylevel, z1_sample, reuse=False):
lf = self.layer_factory
if(reuse == False):
W_deconv1 = lf.weight_variable(name='W_deconv1', shape=[4, 4, self.flags.hidden_size, 1024])
W_deconv2 = lf.weight_variable(name='W_deconv2', shape=[5, 5, 1024, 512])
W_deconv3 = lf.weight_variable(name='W_deconv3', shape=[5, 5, 514, 256])
W_deconv4 = lf.weight_variable(name='W_deconv4', shape=[5, 5, 258, 128])
W_deconv5 = lf.weight_variable(name='W_deconv5', shape=[5, 5, 128, self.nch])
b_deconv1 = lf.bias_variable(name='b_deconv1', shape=[1024])
b_deconv2 = lf.bias_variable(name='b_deconv2', shape=[512])
b_deconv3 = lf.bias_variable(name='b_deconv3', shape=[256])
b_deconv4 = lf.bias_variable(name='b_deconv4', shape=[128])
b_deconv5 = lf.bias_variable(name='b_deconv5', shape=[self.nch])
else:
W_deconv1 = lf.weight_variable(name='W_deconv1')
W_deconv2 = lf.weight_variable(name='W_deconv2')
W_deconv3 = lf.weight_variable(name='W_deconv3')
W_deconv4 = lf.weight_variable(name='W_deconv4')
W_deconv5 = lf.weight_variable(name='W_deconv5')
b_deconv1 = lf.bias_variable(name='b_deconv1')
b_deconv2 = lf.bias_variable(name='b_deconv2')
b_deconv3 = lf.bias_variable(name='b_deconv3')
b_deconv4 = lf.bias_variable(name='b_deconv4')
b_deconv5 = lf.bias_variable(name='b_deconv5')
inp_greylevel2d = tf.reshape(inp_greylevel, [self.flags.batch_size, \
self.flags.img_height, self.flags.img_width, 1])
input_concat2d = tf.reshape(z1_sample, [self.flags.batch_size, 1, 1, self.flags.hidden_size])
deconv1_upsamp = tf.image.resize_images(input_concat2d, [4, 4])
deconv1 = tf.nn.relu(lf.conv2d(deconv1_upsamp, W_deconv1, stride=1) + b_deconv1)
deconv1_norm = lf.batch_norm_aiuiuc_wrapper(deconv1, bn_is_training, \
'BN_deconv1', reuse_vars=reuse)
deconv2_upsamp = tf.image.resize_images(deconv1_norm, [8, 8])
deconv2 = tf.nn.relu(lf.conv2d(deconv2_upsamp, W_deconv2, stride=1) + b_deconv2)
deconv2_norm = lf.batch_norm_aiuiuc_wrapper(deconv2, bn_is_training, \
'BN_deconv2', reuse_vars=reuse)
deconv3_upsamp = tf.image.resize_images(deconv2_norm, [16, 16])
grey_deconv3_dv, grey_deconv3_dh = self.__get_gradients(inp_greylevel2d, \
shape=[16, 16])
deconv3_upsamp_edge = tf.concat([deconv3_upsamp, grey_deconv3_dv, grey_deconv3_dh], 3)
deconv3 = tf.nn.relu(lf.conv2d(deconv3_upsamp_edge, W_deconv3, stride=1) + b_deconv3)
deconv3_norm = lf.batch_norm_aiuiuc_wrapper(deconv3, bn_is_training, \
'BN_deconv3', reuse_vars=reuse)
deconv4_upsamp = tf.image.resize_images(deconv3_norm, [32, 32])
grey_deconv4_dv, grey_deconv4_dh = self.__get_gradients(inp_greylevel2d, \
shape=[32, 32])
deconv4_upsamp_edge = tf.concat([deconv4_upsamp, grey_deconv4_dv, grey_deconv4_dh], 3)
deconv4 = tf.nn.relu(lf.conv2d(deconv4_upsamp_edge, W_deconv4, stride=1) + b_deconv4)
deconv4_norm = lf.batch_norm_aiuiuc_wrapper(deconv4, bn_is_training, \
'BN_deconv4', reuse_vars=reuse)
deconv5_upsamp = tf.image.resize_images(deconv4_norm, [64, 64])
deconv5 = lf.conv2d(deconv5_upsamp, W_deconv5, stride=1) + b_deconv5
deconv5_norm = lf.batch_norm_aiuiuc_wrapper(deconv5, bn_is_training, \
'BN_deconv5', reuse_vars=reuse)
decoded_ch = tf.reshape(tf.tanh(deconv5_norm), \
[self.flags.batch_size, self.flags.img_height*self.flags.img_width*self.nch])
return decoded_ch
def __get_gradients(self, in_tensor2d, shape=None):
if(shape is not None):
in_tensor = tf.image.resize_images(in_tensor2d, [shape[0], shape[1]])
else:
in_tensor = in_tensor2d
[n,w,h,c] = in_tensor.get_shape().as_list()
dvert = in_tensor[:,1:,:h,:] - in_tensor[:,:w-1,:h,:]
dvert_padded = tf.concat([tf.constant(0., shape=[n, 1, h, c]), dvert], 1)
dhorz = in_tensor[:,:w,1:,:] - in_tensor[:,:w,:h-1,:]
dhorz_padded = tf.concat([tf.constant(0., shape=[n, w, 1, c]), dhorz], 2)
return dvert_padded, dhorz_padded
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.serialization import jsonutils
from keystone import config
from keystone import exception
from keystone.policy.backends import rules
from keystone import tests
from keystone.tests.ksfixtures import temporaryfile
from keystone.tests import test_v3
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class IdentityTestProtectedCase(test_v3.RestfulTestCase):
"""Test policy enforcement on the v3 Identity API."""
def setUp(self):
"""Setup for Identity Protection Test Cases.
As well as the usual housekeeping, create a set of domains,
users, roles and projects for the subsequent tests:
- Three domains: A,B & C. C is disabled.
- DomainA has user1, DomainB has user2 and user3
- DomainA has group1 and group2, DomainB has group3
- User1 has two roles on DomainA
- User2 has one role on DomainA
Remember that there will also be a fourth domain in existence,
the default domain.
"""
# Ensure that test_v3.RestfulTestCase doesn't load its own
# sample data, which would make checking the results of our
# tests harder
super(IdentityTestProtectedCase, self).setUp()
# Initialize the policy engine and allow us to write to a temp
# file in each test to create the policies
self.addCleanup(rules.reset)
rules.reset()
self.tempfile = self.useFixture(temporaryfile.SecureTempFile())
self.tmpfilename = self.tempfile.file_name
self.config_fixture.config(policy_file=self.tmpfilename)
# A default auth request we can use - un-scoped user token
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'])
def load_sample_data(self):
self._populate_default_domain()
# Start by creating a couple of domains
self.domainA = self.new_domain_ref()
self.assignment_api.create_domain(self.domainA['id'], self.domainA)
self.domainB = self.new_domain_ref()
self.assignment_api.create_domain(self.domainB['id'], self.domainB)
self.domainC = self.new_domain_ref()
self.domainC['enabled'] = False
self.assignment_api.create_domain(self.domainC['id'], self.domainC)
# Now create some users, one in domainA and two of them in domainB
self.user1 = self.new_user_ref(domain_id=self.domainA['id'])
password = uuid.uuid4().hex
self.user1['password'] = password
self.user1 = self.identity_api.create_user(self.user1)
self.user1['password'] = password
self.user2 = self.new_user_ref(domain_id=self.domainB['id'])
password = uuid.uuid4().hex
self.user2['password'] = password
self.user2 = self.identity_api.create_user(self.user2)
self.user2['password'] = password
self.user3 = self.new_user_ref(domain_id=self.domainB['id'])
password = uuid.uuid4().hex
self.user3['password'] = password
self.user3 = self.identity_api.create_user(self.user3)
self.user3['password'] = password
self.group1 = self.new_group_ref(domain_id=self.domainA['id'])
self.group1 = self.identity_api.create_group(self.group1)
self.group2 = self.new_group_ref(domain_id=self.domainA['id'])
self.group2 = self.identity_api.create_group(self.group2)
self.group3 = self.new_group_ref(domain_id=self.domainB['id'])
self.group3 = self.identity_api.create_group(self.group3)
self.role = self.new_role_ref()
self.role_api.create_role(self.role['id'], self.role)
self.role1 = self.new_role_ref()
self.role_api.create_role(self.role1['id'], self.role1)
self.assignment_api.create_grant(self.role['id'],
user_id=self.user1['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role['id'],
user_id=self.user2['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role1['id'],
user_id=self.user1['id'],
domain_id=self.domainA['id'])
def _get_id_list_from_ref_list(self, ref_list):
result_list = []
for x in ref_list:
result_list.append(x['id'])
return result_list
def _set_policy(self, new_policy):
with open(self.tmpfilename, "w") as policyfile:
policyfile.write(jsonutils.dumps(new_policy))
def test_list_users_unprotected(self):
"""GET /users (unprotected)
Test Plan:
- Update policy so api is unprotected
- Use an un-scoped token to make sure we can get back all
the users independent of domain
"""
self._set_policy({"identity:list_users": []})
r = self.get('/users', auth=self.auth)
id_list = self._get_id_list_from_ref_list(r.result.get('users'))
self.assertIn(self.user1['id'], id_list)
self.assertIn(self.user2['id'], id_list)
self.assertIn(self.user3['id'], id_list)
def test_list_users_filtered_by_domain(self):
"""GET /users?domain_id=mydomain (filtered)
Test Plan:
- Update policy so api is unprotected
- Use an un-scoped token to make sure we can filter the
users by domainB, getting back the 2 users in that domain
"""
self._set_policy({"identity:list_users": []})
url_by_name = '/users?domain_id=%s' % self.domainB['id']
r = self.get(url_by_name, auth=self.auth)
# We should get back two users, those in DomainB
id_list = self._get_id_list_from_ref_list(r.result.get('users'))
self.assertIn(self.user2['id'], id_list)
self.assertIn(self.user3['id'], id_list)
def test_get_user_protected_match_id(self):
"""GET /users/{id} (match payload)
Test Plan:
- Update policy to protect api by user_id
- List users with user_id of user1 as filter, to check that
this will correctly match user_id in the flattened
payload
"""
# TODO(henry-nash, ayoung): It would be good to expand this
# test for further test flattening, e.g. protect on, say, an
# attribute of an object being created
new_policy = {"identity:get_user": [["user_id:%(user_id)s"]]}
self._set_policy(new_policy)
url_by_name = '/users/%s' % self.user1['id']
r = self.get(url_by_name, auth=self.auth)
self.assertEqual(self.user1['id'], r.result['user']['id'])
def test_get_user_protected_match_target(self):
"""GET /users/{id} (match target)
Test Plan:
- Update policy to protect api by domain_id
- Try and read a user who is in DomainB with a token scoped
to Domain A - this should fail
- Retry this for a user who is in Domain A, which should succeed.
- Finally, try getting a user that does not exist, which should
still return UserNotFound
"""
new_policy = {'identity:get_user':
[["domain_id:%(target.user.domain_id)s"]]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/users/%s' % self.user2['id']
r = self.get(url_by_name, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
url_by_name = '/users/%s' % self.user1['id']
r = self.get(url_by_name, auth=self.auth)
self.assertEqual(self.user1['id'], r.result['user']['id'])
url_by_name = '/users/%s' % uuid.uuid4().hex
r = self.get(url_by_name, auth=self.auth,
expected_status=exception.UserNotFound.code)
def test_revoke_grant_protected_match_target(self):
"""DELETE /domains/{id}/users/{id}/roles/{id} (match target)
Test Plan:
- Update policy to protect api by domain_id of entities in
the grant
- Try and delete the existing grant that has a user who is
from a different domain - this should fail.
- Retry this for a user who is in Domain A, which should succeed.
"""
new_policy = {'identity:revoke_grant':
[["domain_id:%(target.user.domain_id)s"]]}
self._set_policy(new_policy)
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domainA['id'],
'user_id': self.user2['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role['id']}
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
self.delete(member_url, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domainA['id'],
'user_id': self.user1['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role1['id']}
self.delete(member_url, auth=self.auth)
def test_list_users_protected_by_domain(self):
"""GET /users?domain_id=mydomain (protected)
Test Plan:
- Update policy to protect api by domain_id
- List groups using a token scoped to domainA with a filter
specifying domainA - we should only get back the one user
that is in domainA.
- Try and read the users from domainB - this should fail since
we don't have a token scoped for domainB
"""
new_policy = {"identity:list_users": ["domain_id:%(domain_id)s"]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/users?domain_id=%s' % self.domainA['id']
r = self.get(url_by_name, auth=self.auth)
# We should only get back one user, the one in DomainA
id_list = self._get_id_list_from_ref_list(r.result.get('users'))
self.assertEqual(1, len(id_list))
self.assertIn(self.user1['id'], id_list)
# Now try for domainB, which should fail
url_by_name = '/users?domain_id=%s' % self.domainB['id']
r = self.get(url_by_name, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
def test_list_groups_protected_by_domain(self):
"""GET /groups?domain_id=mydomain (protected)
Test Plan:
- Update policy to protect api by domain_id
- List groups using a token scoped to domainA and make sure
we only get back the two groups that are in domainA
- Try and read the groups from domainB - this should fail since
we don't have a token scoped for domainB
"""
new_policy = {"identity:list_groups": ["domain_id:%(domain_id)s"]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/groups?domain_id=%s' % self.domainA['id']
r = self.get(url_by_name, auth=self.auth)
# We should only get back two groups, the ones in DomainA
id_list = self._get_id_list_from_ref_list(r.result.get('groups'))
self.assertEqual(2, len(id_list))
self.assertIn(self.group1['id'], id_list)
self.assertIn(self.group2['id'], id_list)
# Now try for domainB, which should fail
url_by_name = '/groups?domain_id=%s' % self.domainB['id']
r = self.get(url_by_name, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
def test_list_groups_protected_by_domain_and_filtered(self):
"""GET /groups?domain_id=mydomain&name=myname (protected)
Test Plan:
- Update policy to protect api by domain_id
- List groups using a token scoped to domainA with a filter
specifying both domainA and the name of group.
- We should only get back the group in domainA that matches
the name
"""
new_policy = {"identity:list_groups": ["domain_id:%(domain_id)s"]}
self._set_policy(new_policy)
self.auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
domain_id=self.domainA['id'])
url_by_name = '/groups?domain_id=%s&name=%s' % (
self.domainA['id'], self.group2['name'])
r = self.get(url_by_name, auth=self.auth)
# We should only get back one user, the one in DomainA that matches
# the name supplied
id_list = self._get_id_list_from_ref_list(r.result.get('groups'))
self.assertEqual(1, len(id_list))
self.assertIn(self.group2['id'], id_list)
class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase):
"""Test policy enforcement of the sample v3 cloud policy file."""
def setUp(self):
"""Setup for v3 Cloud Policy Sample Test Cases.
The following data is created:
- Three domains: domainA, domainB and admin_domain
- One project, which name is 'project'
- domainA has three users: domain_admin_user, project_admin_user and
just_a_user:
- domain_admin_user has role 'admin' on domainA,
- project_admin_user has role 'admin' on the project,
- just_a_user has a non-admin role on both domainA and the project.
- admin_domain has user cloud_admin_user, with an 'admin' role
on admin_domain.
We test various api protection rules from the cloud sample policy
file to make sure the sample is valid and that we correctly enforce it.
"""
# Ensure that test_v3.RestfulTestCase doesn't load its own
# sample data, which would make checking the results of our
# tests harder
super(IdentityTestv3CloudPolicySample, self).setUp()
# Finally, switch to the v3 sample policy file
self.addCleanup(rules.reset)
rules.reset()
self.config_fixture.config(
policy_file=tests.dirs.etc('policy.v3cloudsample.json'))
def load_sample_data(self):
# Start by creating a couple of domains
self._populate_default_domain()
self.domainA = self.new_domain_ref()
self.assignment_api.create_domain(self.domainA['id'], self.domainA)
self.domainB = self.new_domain_ref()
self.assignment_api.create_domain(self.domainB['id'], self.domainB)
self.admin_domain = {'id': 'admin_domain_id', 'name': 'Admin_domain'}
self.assignment_api.create_domain(self.admin_domain['id'],
self.admin_domain)
# And our users
self.cloud_admin_user = self.new_user_ref(
domain_id=self.admin_domain['id'])
password = uuid.uuid4().hex
self.cloud_admin_user['password'] = password
self.cloud_admin_user = (
self.identity_api.create_user(self.cloud_admin_user))
self.cloud_admin_user['password'] = password
self.just_a_user = self.new_user_ref(domain_id=self.domainA['id'])
password = uuid.uuid4().hex
self.just_a_user['password'] = password
self.just_a_user = self.identity_api.create_user(self.just_a_user)
self.just_a_user['password'] = password
self.domain_admin_user = self.new_user_ref(
domain_id=self.domainA['id'])
password = uuid.uuid4().hex
self.domain_admin_user['password'] = password
self.domain_admin_user = (
self.identity_api.create_user(self.domain_admin_user))
self.domain_admin_user['password'] = password
self.project_admin_user = self.new_user_ref(
domain_id=self.domainA['id'])
password = uuid.uuid4().hex
self.project_admin_user['password'] = password
self.project_admin_user = (
self.identity_api.create_user(self.project_admin_user))
self.project_admin_user['password'] = password
# The admin role and another plain role
self.admin_role = {'id': uuid.uuid4().hex, 'name': 'admin'}
self.role_api.create_role(self.admin_role['id'], self.admin_role)
self.role = self.new_role_ref()
self.role_api.create_role(self.role['id'], self.role)
# The cloud admin just gets the admin role
self.assignment_api.create_grant(self.admin_role['id'],
user_id=self.cloud_admin_user['id'],
domain_id=self.admin_domain['id'])
# Assign roles to the domain
self.assignment_api.create_grant(self.admin_role['id'],
user_id=self.domain_admin_user['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role['id'],
user_id=self.just_a_user['id'],
domain_id=self.domainA['id'])
# Create and assign roles to the project
self.project = self.new_project_ref(domain_id=self.domainA['id'])
self.assignment_api.create_project(self.project['id'], self.project)
self.assignment_api.create_grant(self.admin_role['id'],
user_id=self.project_admin_user['id'],
project_id=self.project['id'])
self.assignment_api.create_grant(self.role['id'],
user_id=self.just_a_user['id'],
project_id=self.project['id'])
def _stati(self, expected_status):
# Return the expected return codes for APIs with and without data
# with any specified status overriding the normal values
if expected_status is None:
return (200, 201, 204)
else:
return (expected_status, expected_status, expected_status)
def _test_user_management(self, domain_id, expected=None):
status_OK, status_created, status_no_data = self._stati(expected)
entity_url = '/users/%s' % self.just_a_user['id']
list_url = '/users?domain_id=%s' % domain_id
self.get(entity_url, auth=self.auth,
expected_status=status_OK)
self.get(list_url, auth=self.auth,
expected_status=status_OK)
user = {'description': 'Updated'}
self.patch(entity_url, auth=self.auth, body={'user': user},
expected_status=status_OK)
self.delete(entity_url, auth=self.auth,
expected_status=status_no_data)
user_ref = self.new_user_ref(domain_id=domain_id)
self.post('/users', auth=self.auth, body={'user': user_ref},
expected_status=status_created)
def _test_project_management(self, domain_id, expected=None):
status_OK, status_created, status_no_data = self._stati(expected)
entity_url = '/projects/%s' % self.project['id']
list_url = '/projects?domain_id=%s' % domain_id
self.get(entity_url, auth=self.auth,
expected_status=status_OK)
self.get(list_url, auth=self.auth,
expected_status=status_OK)
project = {'description': 'Updated'}
self.patch(entity_url, auth=self.auth, body={'project': project},
expected_status=status_OK)
self.delete(entity_url, auth=self.auth,
expected_status=status_no_data)
proj_ref = self.new_project_ref(domain_id=domain_id)
self.post('/projects', auth=self.auth, body={'project': proj_ref},
expected_status=status_created)
def _test_domain_management(self, expected=None):
status_OK, status_created, status_no_data = self._stati(expected)
entity_url = '/domains/%s' % self.domainB['id']
list_url = '/domains'
self.get(entity_url, auth=self.auth,
expected_status=status_OK)
self.get(list_url, auth=self.auth,
expected_status=status_OK)
domain = {'description': 'Updated', 'enabled': False}
self.patch(entity_url, auth=self.auth, body={'domain': domain},
expected_status=status_OK)
self.delete(entity_url, auth=self.auth,
expected_status=status_no_data)
domain_ref = self.new_domain_ref()
self.post('/domains', auth=self.auth, body={'domain': domain_ref},
expected_status=status_created)
def _test_grants(self, target, entity_id, expected=None):
status_OK, status_created, status_no_data = self._stati(expected)
a_role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.role_api.create_role(a_role['id'], a_role)
collection_url = (
'/%(target)s/%(target_id)s/users/%(user_id)s/roles' % {
'target': target,
'target_id': entity_id,
'user_id': self.just_a_user['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': a_role['id']}
self.put(member_url, auth=self.auth,
expected_status=status_no_data)
self.head(member_url, auth=self.auth,
expected_status=status_no_data)
self.get(collection_url, auth=self.auth,
expected_status=status_OK)
self.delete(member_url, auth=self.auth,
expected_status=status_no_data)
def test_user_management(self):
# First, authenticate with a user that does not have the domain
# admin role - shouldn't be able to do much.
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'],
domain_id=self.domainA['id'])
self._test_user_management(
self.domainA['id'], expected=exception.ForbiddenAction.code)
# Now, authenticate with a user that does have the domain admin role
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
self._test_user_management(self.domainA['id'])
def test_user_management_by_cloud_admin(self):
# Test users management with a cloud admin. This user should
# be able to manage users in any domain.
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
domain_id=self.admin_domain['id'])
self._test_user_management(self.domainA['id'])
def test_project_management(self):
# First, authenticate with a user that does not have the project
# admin role - shouldn't be able to do much.
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'],
domain_id=self.domainA['id'])
self._test_project_management(
self.domainA['id'], expected=exception.ForbiddenAction.code)
# ...but should still be able to list projects of which they are
# a member
url = '/users/%s/projects' % self.just_a_user['id']
self.get(url, auth=self.auth)
# Now, authenticate with a user that does have the domain admin role
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
self._test_project_management(self.domainA['id'])
def test_project_management_by_cloud_admin(self):
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
domain_id=self.admin_domain['id'])
# Check whether cloud admin can operate a domain
# other than its own domain or not
self._test_project_management(self.domainA['id'])
def test_domain_grants(self):
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'],
domain_id=self.domainA['id'])
self._test_grants('domains', self.domainA['id'],
expected=exception.ForbiddenAction.code)
# Now, authenticate with a user that does have the domain admin role
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
self._test_grants('domains', self.domainA['id'])
# Check that with such a token we cannot modify grants on a
# different domain
self._test_grants('domains', self.domainB['id'],
expected=exception.ForbiddenAction.code)
def test_domain_grants_by_cloud_admin(self):
# Test domain grants with a cloud admin. This user should be
# able to manage roles on any domain.
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
domain_id=self.admin_domain['id'])
self._test_grants('domains', self.domainA['id'])
def test_project_grants(self):
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'],
project_id=self.project['id'])
self._test_grants('projects', self.project['id'],
expected=exception.ForbiddenAction.code)
# Now, authenticate with a user that does have the project
# admin role
self.auth = self.build_authentication_request(
user_id=self.project_admin_user['id'],
password=self.project_admin_user['password'],
project_id=self.project['id'])
self._test_grants('projects', self.project['id'])
def test_project_grants_by_domain_admin(self):
# Test project grants with a domain admin. This user should be
# able to manage roles on any project in its own domain.
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
self._test_grants('projects', self.project['id'])
def test_cloud_admin(self):
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
self._test_domain_management(
expected=exception.ForbiddenAction.code)
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
domain_id=self.admin_domain['id'])
self._test_domain_management()
def test_list_user_credentials(self):
self.credential_user = self.new_credential_ref(self.just_a_user['id'])
self.credential_api.create_credential(self.credential_user['id'],
self.credential_user)
self.credential_admin = self.new_credential_ref(
self.cloud_admin_user['id'])
self.credential_api.create_credential(self.credential_admin['id'],
self.credential_admin)
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
password=self.just_a_user['password'])
url = '/credentials?user_id=%s' % self.just_a_user['id']
self.get(url, auth=self.auth)
url = '/credentials?user_id=%s' % self.cloud_admin_user['id']
self.get(url, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
url = '/credentials'
self.get(url, auth=self.auth,
expected_status=exception.ForbiddenAction.code)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import dhcp
from neutron.common import config as base_config
from neutron.common import constants
from neutron.openstack.common import log as logging
from neutron.tests import base
LOG = logging.getLogger(__name__)
class FakeIPAllocation:
def __init__(self, address, subnet_id=None):
self.ip_address = address
self.subnet_id = subnet_id
class DhcpOpt(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __str__(self):
return str(self.__dict__)
class FakePort1:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
admin_state_up = True
device_owner = 'foo1'
fixed_ips = [FakeIPAllocation('192.168.0.2')]
mac_address = '00:00:80:aa:bb:cc'
def __init__(self):
self.extra_dhcp_opts = []
class FakePort2:
id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
admin_state_up = False
device_owner = 'foo2'
fixed_ips = [FakeIPAllocation('fdca:3ba5:a17a:4ba3::2')]
mac_address = '00:00:f3:aa:bb:cc'
def __init__(self):
self.extra_dhcp_opts = []
class FakePort3:
id = '44444444-4444-4444-4444-444444444444'
admin_state_up = True
device_owner = 'foo3'
fixed_ips = [FakeIPAllocation('192.168.0.3'),
FakeIPAllocation('fdca:3ba5:a17a:4ba3::3')]
mac_address = '00:00:0f:aa:bb:cc'
def __init__(self):
self.extra_dhcp_opts = []
class FakeRouterPort:
id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr'
admin_state_up = True
device_owner = constants.DEVICE_OWNER_ROUTER_INTF
fixed_ips = [FakeIPAllocation('192.168.0.1',
'dddddddd-dddd-dddd-dddd-dddddddddddd')]
mac_address = '00:00:0f:rr:rr:rr'
def __init__(self):
self.extra_dhcp_opts = []
class FakePortMultipleAgents1:
id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr'
admin_state_up = True
device_owner = constants.DEVICE_OWNER_DHCP
fixed_ips = [FakeIPAllocation('192.168.0.5',
'dddddddd-dddd-dddd-dddd-dddddddddddd')]
mac_address = '00:00:0f:dd:dd:dd'
def __init__(self):
self.extra_dhcp_opts = []
class FakePortMultipleAgents2:
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
admin_state_up = True
device_owner = constants.DEVICE_OWNER_DHCP
fixed_ips = [FakeIPAllocation('192.168.0.6',
'dddddddd-dddd-dddd-dddd-dddddddddddd')]
mac_address = '00:00:0f:ee:ee:ee'
def __init__(self):
self.extra_dhcp_opts = []
class FakeV4HostRoute:
destination = '20.0.0.1/24'
nexthop = '20.0.0.1'
class FakeV4HostRouteGateway:
destination = '0.0.0.0/0'
nexthop = '10.0.0.1'
class FakeV6HostRoute:
destination = 'gdca:3ba5:a17a:4ba3::/64'
nexthop = 'gdca:3ba5:a17a:4ba3::1'
class FakeV4Subnet:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
host_routes = [FakeV4HostRoute]
dns_nameservers = ['8.8.8.8']
class FakeV4SubnetGatewayRoute:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
host_routes = [FakeV4HostRouteGateway]
dns_nameservers = ['8.8.8.8']
class FakeV4SubnetMultipleAgentsWithoutDnsProvided:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
dns_nameservers = []
host_routes = []
class FakeV4MultipleAgentsWithoutDnsProvided:
id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
subnets = [FakeV4SubnetMultipleAgentsWithoutDnsProvided()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(),
FakePortMultipleAgents1(), FakePortMultipleAgents2()]
namespace = 'qdhcp-ns'
class FakeV4SubnetMultipleAgentsWithDnsProvided:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
dns_nameservers = ['8.8.8.8']
host_routes = []
class FakeV4MultipleAgentsWithDnsProvided:
id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
subnets = [FakeV4SubnetMultipleAgentsWithDnsProvided()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(),
FakePortMultipleAgents1(), FakePortMultipleAgents2()]
namespace = 'qdhcp-ns'
class FakeV6Subnet:
id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
ip_version = 6
cidr = 'fdca:3ba5:a17a:4ba3::/64'
gateway_ip = 'fdca:3ba5:a17a:4ba3::1'
enable_dhcp = True
host_routes = [FakeV6HostRoute]
dns_nameservers = ['gdca:3ba5:a17a:4ba3::1']
class FakeV4SubnetNoDHCP:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = '192.168.1.1'
enable_dhcp = False
host_routes = []
dns_nameservers = []
class FakeV4SubnetNoGateway:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = None
enable_dhcp = True
host_routes = []
dns_nameservers = []
class FakeV4SubnetNoRouter:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = '192.168.1.1'
enable_dhcp = True
host_routes = []
dns_nameservers = []
class FakeV4Network:
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
subnets = [FakeV4Subnet()]
ports = [FakePort1()]
namespace = 'qdhcp-ns'
class FakeV6Network:
id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
subnets = [FakeV6Subnet()]
ports = [FakePort2()]
namespace = 'qdhcp-ns'
class FakeDualNetwork:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4Subnet(), FakeV6Subnet()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
namespace = 'qdhcp-ns'
class FakeDualNetworkGatewayRoute:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4SubnetGatewayRoute(), FakeV6Subnet()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
namespace = 'qdhcp-ns'
class FakeDualNetworkSingleDHCP:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
namespace = 'qdhcp-ns'
class FakeV4NoGatewayNetwork:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4SubnetNoGateway()]
ports = [FakePort1()]
class FakeV4NetworkNoRouter:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4SubnetNoRouter()]
ports = [FakePort1()]
class FakeDualV4Pxe3Ports:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
namespace = 'qdhcp-ns'
def __init__(self, port_detail="portsSame"):
if port_detail == "portsSame":
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')]
self.ports[2].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')]
else:
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.2'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')]
self.ports[2].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')]
class FakeV4NetworkPxe2Ports:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
subnets = [FakeV4Subnet()]
ports = [FakePort1(), FakePort2(), FakeRouterPort()]
namespace = 'qdhcp-ns'
def __init__(self, port_detail="portsSame"):
if port_detail == "portsSame":
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
else:
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
class FakeV4NetworkPxe3Ports:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
subnets = [FakeV4Subnet()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
namespace = 'qdhcp-ns'
def __init__(self, port_detail="portsSame"):
if port_detail == "portsSame":
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[2].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
else:
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')]
self.ports[2].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')]
class LocalChild(dhcp.DhcpLocalProcess):
PORTS = {4: [4], 6: [6]}
def __init__(self, *args, **kwargs):
super(LocalChild, self).__init__(*args, **kwargs)
self.called = []
def reload_allocations(self):
self.called.append('reload')
def restart(self):
self.called.append('restart')
def spawn_process(self):
self.called.append('spawn')
class TestBase(base.BaseTestCase):
def setUp(self):
super(TestBase, self).setUp()
root = os.path.dirname(os.path.dirname(__file__))
args = ['--config-file',
os.path.join(root, 'etc', 'neutron.conf.test')]
self.conf = config.setup_conf()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(dhcp.OPTS)
config.register_interface_driver_opts_helper(self.conf)
instance = mock.patch("neutron.agent.linux.dhcp.DeviceManager")
self.mock_mgr = instance.start()
self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata',
default=True))
self.conf(args=args)
self.conf.set_override('state_path', '')
self.conf.use_namespaces = True
self.replace_p = mock.patch('neutron.agent.linux.utils.replace_file')
self.execute_p = mock.patch('neutron.agent.linux.utils.execute')
self.safe = self.replace_p.start()
self.execute = self.execute_p.start()
class TestDhcpBase(TestBase):
def test_existing_dhcp_networks_abstract_error(self):
self.assertRaises(NotImplementedError,
dhcp.DhcpBase.existing_dhcp_networks,
None, None)
def test_check_version_abstract_error(self):
self.assertRaises(NotImplementedError,
dhcp.DhcpBase.check_version)
def test_base_abc_error(self):
self.assertRaises(TypeError, dhcp.DhcpBase, None)
def test_restart(self):
class SubClass(dhcp.DhcpBase):
def __init__(self):
dhcp.DhcpBase.__init__(self, cfg.CONF, FakeV4Network(), None)
self.called = []
def enable(self):
self.called.append('enable')
def disable(self, retain_port=False):
self.called.append('disable %s' % retain_port)
def reload_allocations(self):
pass
@property
def active(self):
return True
c = SubClass()
c.restart()
self.assertEqual(c.called, ['disable True', 'enable'])
class TestDhcpLocalProcess(TestBase):
def test_active(self):
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = \
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
with mock.patch.object(LocalChild, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
lp = LocalChild(self.conf, FakeV4Network())
self.assertTrue(lp.active)
mock_open.assert_called_once_with('/proc/4/cmdline', 'r')
def test_active_none(self):
dummy_cmd_line = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
self.execute.return_value = (dummy_cmd_line, '')
with mock.patch.object(LocalChild, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=None)
lp = LocalChild(self.conf, FakeV4Network())
self.assertFalse(lp.active)
def test_active_cmd_mismatch(self):
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = \
'bbbbbbbb-bbbb-bbbb-aaaa-aaaaaaaaaaaa'
with mock.patch.object(LocalChild, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
lp = LocalChild(self.conf, FakeV4Network())
self.assertFalse(lp.active)
mock_open.assert_called_once_with('/proc/4/cmdline', 'r')
def test_get_conf_file_name(self):
tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev'
with mock.patch('os.path.isdir') as isdir:
isdir.return_value = False
with mock.patch('os.makedirs') as makedirs:
lp = LocalChild(self.conf, FakeV4Network())
self.assertEqual(lp.get_conf_file_name('dev'), tpl)
self.assertFalse(makedirs.called)
def test_get_conf_file_name_ensure_dir(self):
tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev'
with mock.patch('os.path.isdir') as isdir:
isdir.return_value = False
with mock.patch('os.makedirs') as makedirs:
lp = LocalChild(self.conf, FakeV4Network())
self.assertEqual(lp.get_conf_file_name('dev', True), tpl)
self.assertTrue(makedirs.called)
def test_enable_already_active(self):
with mock.patch.object(LocalChild, 'active') as patched:
patched.__get__ = mock.Mock(return_value=True)
lp = LocalChild(self.conf, FakeV4Network())
lp.enable()
self.assertEqual(lp.called, ['restart'])
def test_enable(self):
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['active', 'get_conf_file_name', 'interface_name']]
)
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=False)
mocks['get_conf_file_name'].return_value = '/dir'
mocks['interface_name'].__set__ = mock.Mock()
lp = LocalChild(self.conf,
FakeDualNetwork())
lp.enable()
self.mock_mgr.assert_has_calls(
[mock.call(self.conf, 'sudo', None),
mock.call().setup(mock.ANY, reuse_existing=True)])
self.assertEqual(lp.called, ['spawn'])
self.assertTrue(mocks['interface_name'].__set__.called)
def test_disable_not_active(self):
attrs_to_mock = dict([(a, mock.DEFAULT) for a in
['active', 'interface_name', 'pid']])
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=False)
mocks['pid'].__get__ = mock.Mock(return_value=5)
mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
with mock.patch.object(dhcp.LOG, 'debug') as log:
lp = LocalChild(self.conf, FakeDualNetwork())
lp.disable()
msg = log.call_args[0][0]
self.assertIn('stale', msg)
def test_disable_unknown_network(self):
attrs_to_mock = dict([(a, mock.DEFAULT) for a in
['active', 'interface_name', 'pid']])
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=False)
mocks['pid'].__get__ = mock.Mock(return_value=None)
mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
with mock.patch.object(dhcp.LOG, 'debug') as log:
lp = LocalChild(self.conf, FakeDualNetwork())
lp.disable()
msg = log.call_args[0][0]
self.assertIn('No DHCP', msg)
def test_disable_retain_port(self):
attrs_to_mock = dict([(a, mock.DEFAULT) for a in
['active', 'interface_name', 'pid']])
network = FakeDualNetwork()
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=True)
mocks['pid'].__get__ = mock.Mock(return_value=5)
mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
lp = LocalChild(self.conf, network)
lp.disable(retain_port=True)
exp_args = ['kill', '-9', 5]
self.execute.assert_called_once_with(exp_args, 'sudo')
def test_disable(self):
attrs_to_mock = dict([(a, mock.DEFAULT) for a in
['active', 'interface_name', 'pid']])
network = FakeDualNetwork()
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=True)
mocks['pid'].__get__ = mock.Mock(return_value=5)
mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
lp = LocalChild(self.conf, network)
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip:
lp.disable()
self.mock_mgr.assert_has_calls([mock.call(self.conf, 'sudo', None),
mock.call().destroy(network, 'tap0')])
exp_args = ['kill', '-9', 5]
self.execute.assert_called_once_with(exp_args, 'sudo')
self.assertEqual(ip.return_value.netns.delete.call_count, 0)
def test_disable_delete_ns(self):
self.conf.set_override('dhcp_delete_namespaces', True)
attrs_to_mock = dict([(a, mock.DEFAULT) for a in ['active', 'pid']])
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=False)
mocks['pid'].__get__ = mock.Mock(return_value=False)
lp = LocalChild(self.conf, FakeDualNetwork())
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip:
lp.disable()
ip.return_value.netns.delete.assert_called_with('qdhcp-ns')
def test_pid(self):
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.read.return_value = '5'
lp = LocalChild(self.conf, FakeDualNetwork())
self.assertEqual(lp.pid, 5)
def test_pid_no_an_int(self):
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.read.return_value = 'foo'
lp = LocalChild(self.conf, FakeDualNetwork())
self.assertIsNone(lp.pid)
def test_pid_invalid_file(self):
with mock.patch.object(LocalChild, 'get_conf_file_name') as conf_file:
conf_file.return_value = '.doesnotexist/pid'
lp = LocalChild(self.conf, FakeDualNetwork())
self.assertIsNone(lp.pid)
def test_get_interface_name(self):
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.read.return_value = 'tap0'
lp = LocalChild(self.conf, FakeDualNetwork())
self.assertEqual(lp.interface_name, 'tap0')
def test_set_interface_name(self):
with mock.patch('neutron.agent.linux.utils.replace_file') as replace:
lp = LocalChild(self.conf, FakeDualNetwork())
with mock.patch.object(lp, 'get_conf_file_name') as conf_file:
conf_file.return_value = '/interface'
lp.interface_name = 'tap0'
conf_file.assert_called_once_with('interface',
ensure_conf_dir=True)
replace.assert_called_once_with(mock.ANY, 'tap0')
class TestDnsmasq(TestBase):
def _test_spawn(self, extra_options, network=FakeDualNetwork(),
max_leases=16777216):
def mock_get_conf_file_name(kind, ensure_conf_dir=False):
return '/dhcp/%s/%s' % (network.id, kind)
def fake_argv(index):
if index == 0:
return '/usr/local/bin/neutron-dhcp-agent'
else:
raise IndexError
expected = [
'ip',
'netns',
'exec',
'qdhcp-ns',
'env',
'NEUTRON_NETWORK_ID=%s' % network.id,
'dnsmasq',
'--no-hosts',
'--no-resolv',
'--strict-order',
'--bind-interfaces',
'--interface=tap0',
'--except-interface=lo',
'--pid-file=/dhcp/%s/pid' % network.id,
'--dhcp-hostsfile=/dhcp/%s/host' % network.id,
'--dhcp-optsfile=/dhcp/%s/opts' % network.id,
'--leasefile-ro']
expected.extend(
'--dhcp-range=set:tag%d,%s,static,86400s' %
(i, s.cidr.split('/')[0])
for i, s in enumerate(network.subnets)
)
expected.append('--dhcp-lease-max=%d' % max_leases)
expected.extend(extra_options)
self.execute.return_value = ('', '')
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['_output_opts_file', 'get_conf_file_name', 'interface_name']]
)
with mock.patch.multiple(dhcp.Dnsmasq, **attrs_to_mock) as mocks:
mocks['get_conf_file_name'].side_effect = mock_get_conf_file_name
mocks['_output_opts_file'].return_value = (
'/dhcp/%s/opts' % network.id
)
mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
with mock.patch.object(dhcp.sys, 'argv') as argv:
argv.__getitem__.side_effect = fake_argv
dm = dhcp.Dnsmasq(self.conf, network, version=float(2.59))
dm.spawn_process()
self.assertTrue(mocks['_output_opts_file'].called)
self.execute.assert_called_once_with(expected,
root_helper='sudo',
check_exit_code=True)
def test_spawn(self):
self._test_spawn(['--conf-file=', '--domain=openstacklocal'])
def test_spawn_cfg_config_file(self):
self.conf.set_override('dnsmasq_config_file', '/foo')
self._test_spawn(['--conf-file=/foo', '--domain=openstacklocal'])
def test_spawn_no_dhcp_domain(self):
self.conf.set_override('dhcp_domain', '')
self._test_spawn(['--conf-file='])
def test_spawn_cfg_dns_server(self):
self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8'])
self._test_spawn(['--conf-file=',
'--server=8.8.8.8',
'--domain=openstacklocal'])
def test_spawn_cfg_multiple_dns_server(self):
self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8',
'9.9.9.9'])
self._test_spawn(['--conf-file=',
'--server=8.8.8.8',
'--server=9.9.9.9',
'--domain=openstacklocal'])
def test_spawn_max_leases_is_smaller_than_cap(self):
self._test_spawn(
['--conf-file=', '--domain=openstacklocal'],
network=FakeV4Network(),
max_leases=256)
def test_output_opts_file(self):
fake_v6 = 'gdca:3ba5:a17a:4ba3::1'
fake_v6_cidr = 'gdca:3ba5:a17a:4ba3::/64'
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:tag1,option:dns-server,%s
tag:tag1,option:classless-static-route,%s,%s
tag:tag1,249,%s,%s""".lstrip() % (fake_v6,
fake_v6_cidr, fake_v6,
fake_v6_cidr, fake_v6)
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeDualNetwork(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_gateway_route(self):
fake_v6 = 'gdca:3ba5:a17a:4ba3::1'
fake_v6_cidr = 'gdca:3ba5:a17a:4ba3::/64'
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:router,10.0.0.1
tag:tag1,option:dns-server,%s
tag:tag1,option:classless-static-route,%s,%s
tag:tag1,249,%s,%s""".lstrip() % (fake_v6,
fake_v6_cidr, fake_v6,
fake_v6_cidr, fake_v6)
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeDualNetworkGatewayRoute(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_multiple_agents_without_dns_provided(self):
expected = """
tag:tag0,option:router,192.168.0.1
tag:tag0,option:dns-server,192.168.0.5,192.168.0.6""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf,
FakeV4MultipleAgentsWithoutDnsProvided(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_multiple_agents_with_dns_provided(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:router,192.168.0.1""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf,
FakeV4MultipleAgentsWithDnsProvided(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_single_dhcp(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeDualNetworkSingleDHCP(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_single_dhcp_ver2_48(self):
expected = """
tag0,option:dns-server,8.8.8.8
tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag0,249,20.0.0.1/24,20.0.0.1
tag0,option:router,192.168.0.1""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeDualNetworkSingleDHCP(),
version=float(2.48))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_no_gateway(self):
expected = """
tag:tag0,option:classless-static-route,169.254.169.254/32,192.168.1.1
tag:tag0,249,169.254.169.254/32,192.168.1.1
tag:tag0,option:router""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeV4NoGatewayNetwork(),
version=float(2.59))
with mock.patch.object(dm, '_make_subnet_interface_ip_map') as ipm:
ipm.return_value = {FakeV4SubnetNoGateway.id: '192.168.1.1'}
dm._output_opts_file()
self.assertTrue(ipm.called)
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_no_neutron_router_on_subnet(self):
expected = """
tag:tag0,option:classless-static-route,169.254.169.254/32,192.168.1.2
tag:tag0,249,169.254.169.254/32,192.168.1.2
tag:tag0,option:router,192.168.1.1""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeV4NetworkNoRouter(),
version=float(2.59))
with mock.patch.object(dm, '_make_subnet_interface_ip_map') as ipm:
ipm.return_value = {FakeV4SubnetNoRouter.id: '192.168.1.2'}
dm._output_opts_file()
self.assertTrue(ipm.called)
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_pxe_2port_1net(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:tftp-server,192.168.0.3
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:server-ip-address,192.168.0.2
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:bootfile-name,pxelinux.0
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:tftp-server,192.168.0.3
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:server-ip-address,192.168.0.2
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:bootfile-name,pxelinux.0"""
expected = expected.lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
fp = FakeV4NetworkPxe2Ports()
dm = dhcp.Dnsmasq(self.conf, fp, version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_pxe_2port_1net_diff_details(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:tftp-server,192.168.0.3
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:server-ip-address,192.168.0.2
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:bootfile-name,pxelinux.0
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:tftp-server,192.168.0.5
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:server-ip-address,192.168.0.5
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:bootfile-name,pxelinux.0"""
expected = expected.lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeV4NetworkPxe2Ports("portsDiff"),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_pxe_3port_1net_diff_details(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:tftp-server,192.168.0.3
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:server-ip-address,192.168.0.2
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:bootfile-name,pxelinux.0
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:tftp-server,192.168.0.5
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:server-ip-address,192.168.0.5
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:bootfile-name,pxelinux2.0
tag:44444444-4444-4444-4444-444444444444,option:tftp-server,192.168.0.7
tag:44444444-4444-4444-4444-444444444444,option:server-ip-address,192.168.0.7
tag:44444444-4444-4444-4444-444444444444,option:bootfile-name,pxelinux3.0"""
expected = expected.lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf,
FakeV4NetworkPxe3Ports("portsDifferent"),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_pxe_3port_2net(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:tftp-server,192.168.0.3
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:server-ip-address,192.168.0.2
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:bootfile-name,pxelinux.0
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:tftp-server,192.168.1.3
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:server-ip-address,192.168.1.2
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:bootfile-name,pxelinux2.0
tag:44444444-4444-4444-4444-444444444444,option:tftp-server,192.168.1.3
tag:44444444-4444-4444-4444-444444444444,option:server-ip-address,192.168.1.2
tag:44444444-4444-4444-4444-444444444444,option:bootfile-name,pxelinux3.0"""
expected = expected.lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeDualV4Pxe3Ports(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_reload_allocations(self):
exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host'
exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal,'
'192.168.0.2\n'
'00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2.'
'openstacklocal,[fdca:3ba5:a17a:4ba3::2]\n'
'00:00:0f:aa:bb:cc,host-192-168-0-3.openstacklocal,'
'192.168.0.3\n'
'00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3.'
'openstacklocal,[fdca:3ba5:a17a:4ba3::3]\n'
'00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal,'
'192.168.0.1\n').lstrip()
exp_opt_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/opts'
exp_opt_data = "tag:tag0,option:router,192.168.0.1"
fake_v6 = 'gdca:3ba5:a17a:4ba3::1'
fake_v6_cidr = 'gdca:3ba5:a17a:4ba3::/64'
exp_opt_data = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:tag1,option:dns-server,%s
tag:tag1,option:classless-static-route,%s,%s
tag:tag1,249,%s,%s""".lstrip() % (fake_v6,
fake_v6_cidr, fake_v6,
fake_v6_cidr, fake_v6)
exp_args = ['kill', '-HUP', 5]
with mock.patch('os.path.isdir') as isdir:
isdir.return_value = True
with mock.patch.object(dhcp.Dnsmasq, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
with mock.patch.object(dhcp.Dnsmasq, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=5)
dm = dhcp.Dnsmasq(self.conf, FakeDualNetwork(),
version=float(2.59))
method_name = '_make_subnet_interface_ip_map'
with mock.patch.object(dhcp.Dnsmasq,
method_name) as ip_map:
ip_map.return_value = {}
dm.reload_allocations()
self.assertTrue(ip_map.called)
self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data),
mock.call(exp_opt_name, exp_opt_data)])
self.execute.assert_called_once_with(exp_args, 'sudo')
def test_reload_allocations_stale_pid(self):
exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host'
exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal,'
'192.168.0.2\n'
'00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2.'
'openstacklocal,[fdca:3ba5:a17a:4ba3::2]\n'
'00:00:0f:aa:bb:cc,host-192-168-0-3.openstacklocal,'
'192.168.0.3\n'
'00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3.'
'openstacklocal,[fdca:3ba5:a17a:4ba3::3]\n'
'00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal,'
'192.168.0.1\n').lstrip()
exp_host_data.replace('\n', '')
exp_opt_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/opts'
exp_opt_data = "tag:tag0,option:router,192.168.0.1"
fake_v6 = 'gdca:3ba5:a17a:4ba3::1'
fake_v6_cidr = 'gdca:3ba5:a17a:4ba3::/64'
exp_opt_data = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:tag1,option:dns-server,%s
tag:tag1,option:classless-static-route,%s,%s
tag:tag1,249,%s,%s""".lstrip() % (fake_v6,
fake_v6_cidr, fake_v6,
fake_v6_cidr, fake_v6)
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = None
with mock.patch('os.path.isdir') as isdir:
isdir.return_value = True
with mock.patch.object(dhcp.Dnsmasq, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=5)
dm = dhcp.Dnsmasq(self.conf, FakeDualNetwork(),
version=float(2.59))
method_name = '_make_subnet_interface_ip_map'
with mock.patch.object(dhcp.Dnsmasq, method_name) as ipmap:
ipmap.return_value = {}
dm.reload_allocations()
self.assertTrue(ipmap.called)
self.safe.assert_has_calls([mock.call(exp_host_name,
exp_host_data),
mock.call(exp_opt_name, exp_opt_data)])
mock_open.assert_called_once_with('/proc/5/cmdline', 'r')
def test_release_unused_leases(self):
dnsmasq = dhcp.Dnsmasq(self.conf, FakeDualNetwork())
ip1 = '192.168.1.2'
mac1 = '00:00:80:aa:bb:cc'
ip2 = '192.168.1.3'
mac2 = '00:00:80:cc:bb:aa'
old_leases = set([(ip1, mac1), (ip2, mac2)])
dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases)
dnsmasq._output_hosts_file = mock.Mock()
dnsmasq._release_lease = mock.Mock()
dnsmasq.network.ports = []
dnsmasq._release_unused_leases()
dnsmasq._release_lease.assert_has_calls([mock.call(mac1, ip1),
mock.call(mac2, ip2)],
any_order=True)
def test_release_unused_leases_one_lease(self):
dnsmasq = dhcp.Dnsmasq(self.conf, FakeDualNetwork())
ip1 = '192.168.0.2'
mac1 = '00:00:80:aa:bb:cc'
ip2 = '192.168.0.3'
mac2 = '00:00:80:cc:bb:aa'
old_leases = set([(ip1, mac1), (ip2, mac2)])
dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases)
dnsmasq._output_hosts_file = mock.Mock()
dnsmasq._release_lease = mock.Mock()
dnsmasq.network.ports = [FakePort1()]
dnsmasq._release_unused_leases()
dnsmasq._release_lease.assert_has_calls([mock.call(mac2, ip2)],
any_order=True)
def test_read_hosts_file_leases(self):
filename = '/path/to/file'
with mock.patch('os.path.exists') as mock_exists:
mock_exists.return_value = True
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
lines = ["00:00:80:aa:bb:cc,inst-name,192.168.0.1"]
mock_open.return_value.readlines.return_value = lines
dnsmasq = dhcp.Dnsmasq(self.conf, FakeDualNetwork())
leases = dnsmasq._read_hosts_file_leases(filename)
self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc")]), leases)
mock_exists.assert_called_once_with(filename)
mock_open.assert_called_once_with(filename)
def test_make_subnet_interface_ip_map(self):
with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as ip_dev:
ip_dev.return_value.addr.list.return_value = [
{'cidr': '192.168.0.1/24'}
]
dm = dhcp.Dnsmasq(self.conf,
FakeDualNetwork())
self.assertEqual(
dm._make_subnet_interface_ip_map(),
{FakeV4Subnet.id: '192.168.0.1'}
)
def test_remove_config_files(self):
net = FakeV4Network()
path = '/opt/data/neutron/dhcp'
self.conf.dhcp_confs = path
with mock.patch('shutil.rmtree') as rmtree:
lp = LocalChild(self.conf, net)
lp._remove_config_files()
rmtree.assert_called_once_with(os.path.join(path, net.id),
ignore_errors=True)
def test_existing_dhcp_networks(self):
path = '/opt/data/neutron/dhcp'
self.conf.dhcp_confs = path
cases = {
# network_uuid --> is_dhcp_alive?
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa': True,
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb': False,
'not_uuid_like_name': True
}
def active_fake(self, instance, cls):
return cases[instance.network.id]
with mock.patch('os.listdir') as mock_listdir:
with mock.patch.object(dhcp.Dnsmasq, 'active') as mock_active:
mock_active.__get__ = active_fake
mock_listdir.return_value = cases.keys()
result = dhcp.Dnsmasq.existing_dhcp_networks(self.conf, 'sudo')
mock_listdir.assert_called_once_with(path)
self.assertEqual(['aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'],
result)
def _check_version(self, cmd_out, expected_value):
with mock.patch('neutron.agent.linux.utils.execute') as cmd:
cmd.return_value = cmd_out
result = dhcp.Dnsmasq.check_version()
self.assertEqual(result, expected_value)
def test_check_minimum_version(self):
self._check_version('Dnsmasq version 2.59 Copyright (c)...',
float(2.59))
def test_check_future_version(self):
self._check_version('Dnsmasq version 2.65 Copyright (c)...',
float(2.65))
def test_check_fail_version(self):
self._check_version('Dnsmasq version 2.48 Copyright (c)...',
float(2.48))
def test_check_version_failed_cmd_execution(self):
self._check_version('Error while executing command', 0)
|
|
import ee, getpass, time, math, sys
from flask import Flask, render_template, request
from eeMad import imad
from eeWishart import omnibus
ee.Initialize()
app = Flask(__name__, static_url_path='/static')
def simon(path):
images = ee.List(
[ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160305T171543_20160305T171608_010237_00F1FA_49DC')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160329T171543_20160329T171608_010587_00FBF9_B4DE')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160410T171538_20160410T171603_010762_010122_CEF6')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160422T171539_20160422T171604_010937_010677_03F6')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160504T171539_20160504T171604_011112_010BED_80AF')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160516T171540_20160516T171605_011287_011198_FC21')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160528T171603_20160528T171628_011462_011752_F570')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160609T171604_20160609T171629_011637_011CD1_C2F5')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160715T171605_20160715T171630_012162_012DA2_95A1')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160727T171606_20160727T171631_012337_013359_29A6')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160808T171607_20160808T171632_012512_01392E_44C4')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160901T171608_20160901T171633_012862_0144E3_30E5')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20160925T171609_20160925T171634_013212_015050_8FDB')),
ee.call("S1.dB",ee.Image(path+'S1B_IW_GRDH_1SDV_20161001T171508_20161001T171533_002316_003E9D_D195')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20161007T171609_20161007T171634_013387_0155CD_F513')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20161019T171609_20161019T171634_013562_015B60_27FF')),
ee.call("S1.dB",ee.Image(path+'S1A_IW_GRDH_1SDV_20161031T171609_20161031T171634_013737_0160BD_4FAE')) ] )
return ee.ImageCollection(images)
def simonf(path):
def sel(image):
return ee.Image(image).select(['VV','VH'])
images = ee.List(
[ee.Image(path+'S1A_IW_GRDH_1SDV_20160305T171543_20160305T171608_010237_00F1FA_49DC'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160329T171543_20160329T171608_010587_00FBF9_B4DE'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160410T171538_20160410T171603_010762_010122_CEF6'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160422T171539_20160422T171604_010937_010677_03F6'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160504T171539_20160504T171604_011112_010BED_80AF'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160516T171540_20160516T171605_011287_011198_FC21'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160528T171603_20160528T171628_011462_011752_F570'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160609T171604_20160609T171629_011637_011CD1_C2F5'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160715T171605_20160715T171630_012162_012DA2_95A1'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160727T171606_20160727T171631_012337_013359_29A6'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160808T171607_20160808T171632_012512_01392E_44C4'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160901T171608_20160901T171633_012862_0144E3_30E5'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20160925T171609_20160925T171634_013212_015050_8FDB'),
ee.Image(path+'S1B_IW_GRDH_1SDV_20161001T171508_20161001T171533_002316_003E9D_D195'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20161007T171609_20161007T171634_013387_0155CD_F513'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20161019T171609_20161019T171634_013562_015B60_27FF'),
ee.Image(path+'S1A_IW_GRDH_1SDV_20161031T171609_20161031T171634_013737_0160BD_4FAE') ] )
return ee.ImageCollection(images.map(sel))
@app.route('/')
def index():
return app.send_static_file('index.html')
def get_vv(image):
''' get 'VV' band from sentinel-1 imageCollection and restore linear signal from db-values '''
return image.select('VV').multiply(ee.Image.constant(math.log(10.0)/10.0)).exp()
def get_vh(image):
''' get 'VH' band from sentinel-1 imageCollection and restore linear signal from db-values '''
return image.select('VH').multiply(ee.Image.constant(math.log(10.0)/10.0)).exp()
def get_vvvh(image):
''' get 'VV' and 'VH' bands from sentinel-1 imageCollection and restore linear signal from db-values '''
return image.select('VV','VH').multiply(ee.Image.constant(math.log(10.0)/10.0)).exp()
def get_vvvh_raw(image):
return image.select('VV','VH')
def get_image(current,image):
''' accumulate a single image from a collection of images '''
return ee.Image.cat(ee.Image(image),current)
def clipList(current,prev):
imlist = ee.List(ee.Dictionary(prev).get('imlist'))
rect = ee.Dictionary(prev).get('rect')
imlist = imlist.add(ee.Image(current).clip(rect))
return ee.Dictionary({'imlist':imlist,'rect':rect})
@app.route('/sentinel1.html', methods = ['GET', 'POST'])
def Sentinel1():
if request.method == 'GET':
username = getpass.getuser()
return render_template('sentinel1.html', navbar = 'Hi there %s!'%username,
centerlon = 8.5,
centerlat = 50.05)
else:
try:
startdate = request.form['startdate']
enddate = request.form['enddate']
latitude = float(request.form['latitude'])
longitude = float(request.form['longitude'])
orbit = request.form['orbit']
polarization1 = request.form['polarization']
relativeorbitnumber = request.form['relativeorbitnumber']
if polarization1 == 'VV,VH':
polarization = ['VV','VH']
else:
polarization = polarization1
mode = request.form['mode']
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
how = request.form['how']
if request.form.has_key('export'):
export = request.form['export']
else:
export = 'none'
exportname = request.form['exportname']
start = ee.Date(startdate)
finish = ee.Date(enddate)
if how == 'longlat':
point = ee.Geometry.Point([longitude,latitude])
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(point) \
.filterDate(start, finish) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
.filter(ee.Filter.eq('instrumentMode', mode)) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('orbitProperties_pass', orbit))
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
image = ee.Image(collection.first())
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(image,exportname,
{'scale':10,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
if (polarization1 == 'VV') or (polarization1 == 'VV,VH'):
projection = image.select('VV').projection().getInfo()['crs']
else:
projection = image.select('VH').projection().getInfo()['crs']
downloadpath = image.getDownloadUrl({'scale':1000})
im = get_vv(image)
mapid = im.getMapId({'min':0, 'max':1, 'opacity': 0.5})
return render_template('sentinel1out.html',
mapidclip = mapid['mapid'],
tokenclip = mapid['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = longitude,
centerlat = latitude,
downloadtext = '',
downloadpath = downloadpath,
downloadpathclip = downloadpath,
polarization = polarization1,
projection = projection,
gdexportid = gdexportid,
systemid = systemid,
count = count,
timestamp = timestamp)
elif how=='box':
# overlaps box
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start, finish) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('instrumentMode', mode)) \
.filter(ee.Filter.eq('orbitProperties_pass', orbit))
# test_collection = simonf('TEST/simonf/S1/raw/')
# collection = test_collection \
# .filterBounds(ulPoint) \
# .filterBounds(lrPoint) \
# .filterDate(start, finish) \
# .filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
# .filter(ee.Filter.eq('resolution_meters', 10)) \
# .filter(ee.Filter.eq('instrumentMode', mode)) \
# .filter(ee.Filter.eq('orbitProperties_pass', orbit))
if relativeorbitnumber != 'ANY':
collection = collection.filter(ee.Filter.eq('relativeOrbitNumber_start', int(relativeorbitnumber)))
collection = collection.sort('system:time_start')
system_ids = ee.List(collection.aggregate_array('system:id'))
systemidlist = []
for systemid in system_ids.getInfo():
systemidlist.append(systemid)
systemids = str(systemidlist)
acquisition_times = ee.List(collection.aggregate_array('system:time_start'))
count = acquisition_times.length().getInfo()
if count==0:
raise ValueError('No images found')
timestamplist = []
for timestamp in acquisition_times.getInfo():
tmp = time.gmtime(int(timestamp)/1000)
timestamplist.append(time.strftime('%c', tmp))
timestamp = timestamplist[0]
timestamps = str(timestamplist)
relative_orbit_numbers = ee.List(collection.aggregate_array('relativeOrbitNumber_start'))
relativeorbitnumberlist = []
for ron in relative_orbit_numbers.getInfo():
relativeorbitnumberlist.append(ron)
relativeorbitnumbers = str(relativeorbitnumberlist)
image = ee.Image(collection.first())
systemid = image.get('system:id').getInfo()
if (polarization1 == 'VV') or (polarization1 == 'VV,VH'):
projection = image.select('VV').projection().getInfo()['crs']
else:
projection = image.select('VH').projection().getInfo()['crs']
# make into collection of VV, VH or VVVH images and restore linear scale
if polarization == 'VV':
pcollection = collection.map(get_vv)
elif polarization == 'VH':
pcollection = collection.map(get_vh)
else:
pcollection = collection.map(get_vvvh)
# pcollection = collection.map(get_vvvh_raw)
# clipped image for display on map
image1 = ee.Image(pcollection.first())
image1clip = image1.clip(rect)
downloadpath = image1.getDownloadUrl({'scale':30})
# clip the image collection and create a single multiband image
compositeimage = ee.Image(pcollection.iterate(get_image,image1clip))
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(compositeimage,exportname,
{'scale':10,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpathclip = compositeimage.getDownloadUrl({'scale':10})
if (polarization1 == 'VV') or (polarization1 == 'VV,VH'):
mapid = image1.select('VV').getMapId({'min': 0, 'max':1, 'opacity': 0.6})
mapidclip = image1clip.select('VV').getMapId({'min': 0, 'max':1, 'opacity': 0.7})
else:
mapid = image1.select('VH').getMapId({'min': 0, 'max':1, 'opacity': 0.6})
mapidclip = image1clip.select('VH').getMapId({'min': 0, 'max':1, 'opacity': 0.7})
return render_template('sentinel1out.html',
mapidclip = mapidclip['mapid'],
tokenclip = mapidclip['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadtext = 'Download image collection intersection',
downloadpath = downloadpath,
downloadpathclip = downloadpathclip,
projection = projection,
systemid = systemid,
count = count,
timestamp = timestamp,
gdexportid = gdexportid,
timestamps = timestamps,
systemids = systemids,
polarization = polarization1,
relativeorbitnumbers = relativeorbitnumbers)
except Exception as e:
return '<br />An error occurred in Sentinel1: %s'%e
@app.route('/sentinel2.html', methods = ['GET', 'POST'])
def Sentinel2():
if request.method == 'GET':
username = getpass.getuser()
return render_template('sentinel2.html', navbar = 'Hi there %s!'%username)
else:
try:
startdate = request.form['startdate']
enddate = request.form['enddate']
desired_projection = request.form['projection']
latitude = float(request.form['latitude'])
longitude = float(request.form['longitude'])
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
if request.form.has_key('export'):
export = request.form['export']
else:
export = ' '
exportname = request.form['exportname']
how = request.form['how']
start = ee.Date(startdate)
finish = ee.Date(enddate)
if how == 'longlat':
point = ee.Geometry.Point([longitude,latitude])
elements = ee.ImageCollection('COPERNICUS/S2') \
.filterBounds(point) \
.filterDate(start, finish) \
.sort('CLOUD_COVERAGE_ASSESSMENT', True)
count = elements.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
element = elements.first()
image = ee.Image(element)
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
cloudcover = image.get('CLOUD_COVERAGE_ASSESSMENT').getInfo()
projection = image.select('B2').projection().getInfo()['crs']
if desired_projection != 'default':
projection = desired_projection
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(image,exportname,
{'scale':10,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpath = image.getDownloadUrl({'scale':30,'crs':projection})
mapid = image.select('B2','B3','B4') \
.getMapId({'min': 0, 'max': 2000, 'opacity': 0.8})
return render_template('sentinel2out.html',
mapidclip = mapid['mapid'],
tokenclip = mapid['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = longitude,
centerlat = latitude,
downloadtext = '',
downloadpath = downloadpath,
downloadpathclip = downloadpath,
projection = projection,
systemid = systemid,
cloudcover = cloudcover,
count = count,
timestamp = timestamp)
elif how=='box':
# overlaps box
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
collection = ee.ImageCollection('COPERNICUS/S2') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start, finish) \
.sort('CLOUD_COVERAGE_ASSESSMENT', True)
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
image = ee.Image(collection.first())
imageclip = image.clip(rect)
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
cloudcover = image.get('CLOUD_COVERAGE_ASSESSMENT').getInfo()
projection = image.select('B2').projection().getInfo()['crs']
if desired_projection != 'default':
projection = desired_projection
downloadpath = image.getDownloadUrl({'scale':30,'crs':projection})
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(imageclip.select('B2','B3','B4','B8'),exportname,
{'scale':10,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpathclip = imageclip.select('B2','B3','B4','B8').getDownloadUrl({'scale':10, 'crs':projection})
rgb = image.select('B2','B3','B4')
rgbclip = imageclip.select('B2','B3','B5')
mapid = rgb.getMapId({'min':0, 'max':2000, 'opacity': 0.6})
mapidclip = rgbclip.getMapId({'min':0, 'max':2000, 'opacity': 1.0})
return render_template('sentinel2out.html',
mapidclip = mapidclip['mapid'],
tokenclip = mapidclip['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadtext = 'Download image intersection',
downloadpath = downloadpath,
downloadpathclip = downloadpathclip,
projection = projection,
systemid = systemid,
cloudcover = cloudcover,
count = count,
timestamp = timestamp)
except Exception as e:
return '<br />An error occurred in Sentinel2: %s'%e
@app.route('/landsat5.html', methods = ['GET', 'POST'])
def Landsat5():
if request.method == 'GET':
username = getpass.getuser()
return render_template('landsat5.html', navbar = 'Hi there %s!'%username)
else:
try:
startdate = request.form['startdate']
enddate = request.form['enddate']
path = int(request.form['path'])
row = int(request.form['row'])
latitude = float(request.form['latitude'])
longitude = float(request.form['longitude'])
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
how = request.form['how']
if request.form.has_key('export'):
export = request.form['export']
else:
export = ' '
exportname = request.form['exportname']
start = ee.Date(startdate)
finish = ee.Date(enddate)
if how == 'pathrow':
elements = ee.ImageCollection('LT5_L1T') \
.filterMetadata('WRS_PATH', 'equals', path) \
.filterMetadata('WRS_ROW', 'equals', row) \
.filterDate(start, finish) \
.sort('CLOUD_COVER', True)
count = elements.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
element = elements.first()
image = ee.Image(element)
longitude = (image.get('CORNER_LL_LON_PRODUCT').getInfo()+image.get('CORNER_UR_LON_PRODUCT').getInfo())/2
latitude = (image.get('CORNER_UR_LAT_PRODUCT').getInfo()+image.get('CORNER_LL_LAT_PRODUCT').getInfo())/2
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
projection = image.select('B2').projection().getInfo()['crs']
cloudcover = image.get('CLOUD_COVER').getInfo()
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(image,exportname,
{'scale':30,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpath = image.getDownloadUrl({'scale':30, 'crs':'EPSG:4326'})
rgb = image.select('B4','B5','B7')
mapid = rgb.getMapId({'min':0, 'max':250, 'opacity': 0.6})
return render_template('landsat5out.html',
mapidclip = mapid['mapid'],
tokenclip = mapid['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = longitude,
centerlat = latitude,
downloadtext = '',
downloadpath = downloadpath,
downloadpathclip = downloadpath,
projection = projection,
systemid = systemid,
cloudcover = cloudcover,
count = count,
timestamp = timestamp)
elif how == 'longlat':
point = ee.Geometry.Point([longitude,latitude])
elements = ee.ImageCollection('LT5_L1T') \
.filterBounds(point) \
.filterDate(start, finish) \
.sort('CLOUD_COVER', True)
count = elements.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
element = elements.first()
image = ee.Image(element)
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
cloudcover = image.get('CLOUD_COVER').getInfo()
projection = image.select('B2').projection().getInfo()['crs']
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(image,exportname,
{'scale':30,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpath = image.getDownloadUrl({'scale':30,'crs':projection})
mapid = image.select('B4','B5','B7') \
.getMapId({'min': 0, 'max': 250, 'opacity': 0.6})
return render_template('landsat5out.html',
mapidclip = mapid['mapid'],
tokenclip = mapid['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = longitude,
centerlat = latitude,
downloadtext = '',
downloadpath = downloadpath,
downloadpathclip = downloadpath,
projection = projection,
systemid = systemid,
cloudcove = cloudcover,
count = count,
timestamp = timestamp)
elif how=='box':
# overlaps box
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
collection = ee.ImageCollection('LT5_L1T') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start, finish) \
.sort('CLOUD_COVER', True)
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found')
image = ee.Image(collection.first())
imageclip = image.clip(rect)
timestamp = ee.Date(image.get('system:time_start')).getInfo()
timestamp = time.gmtime(int(timestamp['value'])/1000)
timestamp = time.strftime('%c', timestamp)
systemid = image.get('system:id').getInfo()
cloudcover = image.get('CLOUD_COVER').getInfo()
projection = image.select('B1').projection().getInfo()['crs']
downloadpath = image.getDownloadUrl({'scale':30,'crs':projection})
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(imageclip,exportname,
{'scale':30,'driveFolder':'EarthEngineImages','maxPixels': 1e9})
gdexportid = str(gdexport.id)
print >> sys.stderr, '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
downloadpathclip = imageclip.select('B1','B2','B3','B4','B5','B7').getDownloadUrl({'scale':30, 'crs':projection})
rgb = image.select('B4','B5','B7')
rgbclip = imageclip.select('B4','B5','B7')
mapid = rgb.getMapId({'min':0, 'max':250, 'opacity': 0.6})
mapidclip = rgbclip.getMapId({'min':0, 'max':250, 'opacity': 1.0})
return render_template('landsat5out.html',
mapidclip = mapidclip['mapid'],
tokenclip = mapidclip['token'],
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadtext = 'Download image intersection',
downloadpath = downloadpath,
downloadpathclip = downloadpathclip,
projection = projection,
systemid = systemid,
cloudcover = cloudcover,
count = count,
timestamp = timestamp)
except Exception as e:
return '<br />An error occurred in Landsat5: %s'%e
@app.route('/mad.html', methods = ['GET', 'POST'])
def Mad():
if request.method == 'GET':
username = getpass.getuser()
return render_template('mad.html', navbar = 'Hi there %s!'%username)
else:
try:
path = int(request.form['path'])
row = int(request.form['row'])
niter = int(request.form['iterations'])
start1 = ee.Date(request.form['startdate1'])
finish1 = ee.Date(request.form['enddate1'])
start2 = ee.Date(request.form['startdate2'])
finish2 = ee.Date(request.form['enddate2'])
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
exportname = request.form['exportname']
how = request.form['how']
if request.form.has_key('export'):
export = request.form['export']
else:
export = ' '
if how == 'pathrow':
element = ee.ImageCollection('LT5_L1T') \
.filterMetadata('WRS_PATH', 'equals', path) \
.filterMetadata('WRS_ROW', 'equals', row) \
.filterDate(start1, finish1) \
.sort('CLOUD_COVER') \
.first()
image1 = ee.Image(element).select('B1','B2','B3','B4','B5','B7')
timestamp1 = ee.Date(image1.get('system:time_start')).getInfo()
timestamp1 = time.gmtime(int(timestamp1['value'])/1000)
timestamp1 = time.strftime('%c', timestamp1)
systemid1 = image1.get('system:id').getInfo()
cloudcover1 = image1.get('CLOUD_COVER').getInfo()
centerlon = (image1.get('CORNER_LL_LON_PRODUCT').getInfo()+image1.get('CORNER_UR_LON_PRODUCT').getInfo())/2
centerlat = (image1.get('CORNER_UR_LAT_PRODUCT').getInfo()+image1.get('CORNER_LL_LAT_PRODUCT').getInfo())/2
element = ee.ImageCollection('LT5_L1T') \
.filterMetadata('WRS_PATH', 'equals', path) \
.filterMetadata('WRS_ROW', 'equals', row) \
.filterDate(start2, finish2) \
.sort('CLOUD_COVER') \
.first()
image2 = ee.Image(element).select('B1','B2','B3','B4','B5','B7')
timestamp2 = ee.Date(image2.get('system:time_start')).getInfo()
timestamp2 = time.gmtime(int(timestamp2['value'])/1000)
timestamp2 = time.strftime('%c', timestamp2)
systemid2 = image2.get('system:id').getInfo()
cloudcover2 = image2.get('CLOUD_COVER').getInfo()
elif how=='box':
# overlaps box
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
collection = ee.ImageCollection('LT5_L1T') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start1, finish1) \
.sort('CLOUD_COVER', True)
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found for first time interval')
image1 = ee.Image(collection.first()).clip(rect).select('B1','B2','B3','B4','B5','B7')
timestamp1 = ee.Date(image1.get('system:time_start')).getInfo()
timestamp1 = time.gmtime(int(timestamp1['value'])/1000)
timestamp1 = time.strftime('%c', timestamp1)
systemid1 = image1.get('system:id').getInfo()
cloudcover1 = image1.get('CLOUD_COVER').getInfo()
collection = ee.ImageCollection('LT5_L1T') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start2, finish2) \
.sort('CLOUD_COVER', True)
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found for second time interval')
image2 = ee.Image(collection.first()).clip(rect).select('B1','B2','B3','B4','B5','B7')
timestamp2 = ee.Date(image2.get('system:time_start')).getInfo()
timestamp2 = time.gmtime(int(timestamp2['value'])/1000)
timestamp2 = time.strftime('%c', timestamp2)
systemid2 = image2.get('system:id').getInfo()
cloudcover2 = image2.get('CLOUD_COVER').getInfo()
# iMAD:
B1 = image1.bandNames().get(0)
input_dict = ee.Dictionary({'image1':image1,'image2':image2})
first = ee.Dictionary({'weights':image1.select(ee.String(B1)).multiply(0).add(ee.Image.constant(1)),
'MAD':ee.Image.constant(0)})
# iteration not yet possible, but this is how it goes:
# result = ee.List.repeat(input_dict, nMax).iterate(imad,first)
# fake iteration:
itr = 0
while itr < niter:
result = imad(input_dict,first)
weights = result.get('weights')
first = ee.Dictionary({'weights':weights,'MAD':ee.Image.constant(0)})
itr += 1
# ---------------
MAD = ee.Image(result.get('MAD'))
bNames = MAD.bandNames()
nBands = len(bNames.getInfo())
lastMAD = ee.String(MAD.bandNames().get(nBands-1))
scale = image1.select(ee.String(B1)).projection().nominalScale().getInfo()
downloadpath = MAD.getDownloadUrl({'scale':scale, 'crs':'EPSG:4326'})
mapid = MAD.select(lastMAD).getMapId({'min': -20, 'max': 20, 'opacity': 0.7})
if export == 'export':
# export to Google Drive --------------------------
gdexport = ee.batch.Export.image(MAD,exportname,
{'scale':scale,'driveFolder':'EarthEngineImages'})
gdexportid = str(gdexport.id)
print '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
return render_template('madout.html',
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadpath = downloadpath,
systemid1 = systemid1,
systemid2 = systemid2,
cloudcover1 = cloudcover1,
cloudcover2 = cloudcover2,
timestamp1 = timestamp1,
timestamp2 = timestamp2)
except Exception as e:
return '<br />An error occurred in MAD: %s'%e
@app.route('/wishart.html', methods = ['GET', 'POST'])
def Wishart():
if request.method == 'GET':
username = getpass.getuser()
return render_template('wishart.html', navbar = 'Hi there %s!'%username)
else:
try:
start1 = ee.Date(request.form['startdate1'])
finish1 = ee.Date(request.form['enddate1'])
start2 = ee.Date(request.form['startdate2'])
finish2 = ee.Date(request.form['enddate2'])
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
orbit = request.form['orbit']
polarization1 = request.form['polarization']
relativeorbitnumber = request.form['relativeorbitnumber']
significance = float(request.form['significance'])
if polarization1 == 'VV,VH':
polarization = ['VV','VH']
else:
polarization = polarization1
exportname = request.form['exportname']
if request.form.has_key('export'):
export = request.form['export']
else:
export = ' '
if request.form.has_key('median'):
median = True
else:
median = False
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
# get the first time point image
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start1, finish1) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('instrumentMode', 'IW')) \
.filter(ee.Filter.eq('orbitProperties_pass', orbit))
if relativeorbitnumber != 'ANY':
collection = collection.filter(ee.Filter.eq('relativeOrbitNumber_start', int(relativeorbitnumber)))
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found for first time interval')
collection = collection.sort('system:time_start')
image1 = ee.Image(collection.first()).clip(rect)
timestamp1 = ee.Date(image1.get('system:time_start')).getInfo()
timestamp1= time.gmtime(int(timestamp1['value'])/1000)
timestamp1 = time.strftime('%c', timestamp1)
systemid1 = image1.get('system:id').getInfo()
relativeOrbitNumber1 = int(image1.get('relativeOrbitNumber_start').getInfo())
# get the second time point image
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start2, finish2) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('instrumentMode', 'IW')) \
.filter(ee.Filter.eq('orbitProperties_pass', orbit))
if relativeorbitnumber != 'ANY':
collection = collection.filter(ee.Filter.eq('relativeOrbitNumber_start', int(relativeorbitnumber)))
count = collection.toList(100).length().getInfo()
if count==0:
raise ValueError('No images found for second time interval')
collection = collection.sort('system:time_start')
image2 = ee.Image(collection.first()).clip(rect)
timestamp2 = ee.Date(image2.get('system:time_start')).getInfo()
timestamp2= time.gmtime(int(timestamp2['value'])/1000)
timestamp2 = time.strftime('%c', timestamp2)
systemid2 = image2.get('system:id').getInfo()
relativeOrbitNumber2 = int(image2.get('relativeOrbitNumber_start').getInfo())
# Wishart change detection
if polarization1=='VV,VH':
image1 = get_vvvh(image1)
image2 = get_vvvh(image2)
elif polarization1=='VV':
image1 = get_vv(image1)
image2 = get_vv(image2)
else:
image1 = get_vh(image1)
image2 = get_vh(image2)
result = ee.Dictionary(omnibus(ee.List([image1,image2]),significance,median))
cmap = ee.Image(result.get('cmap'))
mapid = cmap.getMapId({'min':0, 'max':1 ,'palette':'black,red', 'opacity':0.4})
downloadpath = cmap.getDownloadUrl({'scale':10})
if export == 'export':
# export to Assets ---------------------------------
assexport = ee.batch.Export.image.toAsset(cmap,description="wishartTask", assetId=exportname,scale=10,maxPixels=1e9)
assexportid = str(assexport.id)
print '****Exporting to Assets, task id: %s '%assexportid
assexport.start()
else:
assexportid = 'none'
# --------------------------------------------------
return render_template('wishartout.html',
mapid = mapid['mapid'],
token = mapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadpath = downloadpath,
systemid1 = systemid1,
systemid2 = systemid2,
timestamp1 = timestamp1,
timestamp2 = timestamp2,
relativeOrbitNumber1 = relativeOrbitNumber1,
relativeOrbitNumber2 = relativeOrbitNumber2,
significance = significance,
polarization = polarization1,
assexportid = assexportid)
except Exception as e:
return '<br />An error occurred in wishart: %s'%e
@app.route('/omnibus.html', methods = ['GET', 'POST'])
def Omnibus():
if request.method == 'GET':
username = getpass.getuser()
return render_template('omnibus.html', navbar = 'Hi there %s!'%username,
centerlon = 8.5,
centerlat = 50.05)
else:
try:
startdate = request.form['startdate']
enddate = request.form['enddate']
orbit = request.form['orbit']
polarization1 = request.form['polarization']
relativeorbitnumber = request.form['relativeorbitnumber']
if polarization1 == 'VV,VH':
polarization = ['VV','VH']
else:
polarization = polarization1
significance = float(request.form['significance'])
mode = request.form['mode']
minLat = float(request.form['minLat'])
minLon = float(request.form['minLon'])
maxLat = float(request.form['maxLat'])
maxLon = float(request.form['maxLon'])
if request.form.has_key('assexport'):
assexport = request.form['assexport']
else:
assexport = 'none'
if request.form.has_key('gdexport'):
gdexport = request.form['gdexport']
else:
gdexport = 'none'
if request.form.has_key('median'):
median = True
else:
median = False
assexportname = request.form['assexportname']
gdexportname = request.form['gdexportname']
start = ee.Date(startdate)
finish = ee.Date(enddate)
rect = ee.Geometry.Rectangle(minLon,minLat,maxLon,maxLat)
centerlon = (minLon + maxLon)/2.0
centerlat = (minLat + maxLat)/2.0
ulPoint = ee.Geometry.Point([minLon,maxLat])
lrPoint = ee.Geometry.Point([maxLon,minLat])
collection = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterBounds(ulPoint) \
.filterBounds(lrPoint) \
.filterDate(start, finish) \
.filter(ee.Filter.eq('transmitterReceiverPolarisation', polarization)) \
.filter(ee.Filter.eq('resolution_meters', 10)) \
.filter(ee.Filter.eq('instrumentMode', mode)) \
.filter(ee.Filter.eq('orbitProperties_pass', orbit))
if relativeorbitnumber != 'ANY':
collection = collection.filter(ee.Filter.eq('relativeOrbitNumber_start', int(relativeorbitnumber)))
collection = collection.sort('system:time_start')
system_ids = ee.List(collection.aggregate_array('system:id'))
systemidlist = []
for systemid in system_ids.getInfo():
systemidlist.append(systemid)
systemids = str(systemidlist)
acquisition_times = ee.List(collection.aggregate_array('system:time_start'))
count = acquisition_times.length().getInfo()
if count==0:
raise ValueError('No images found')
timestamplist = []
for timestamp in acquisition_times.getInfo():
tmp = time.gmtime(int(timestamp)/1000)
timestamplist.append(time.strftime('%c', tmp))
timestamp = timestamplist[0]
timestamps = str(timestamplist)
relative_orbit_numbers = ee.List(collection.aggregate_array('relativeOrbitNumber_start'))
relativeorbitnumberlist = []
for ron in relative_orbit_numbers.getInfo():
relativeorbitnumberlist.append(ron)
relativeorbitnumbers = str(relativeorbitnumberlist)
image = ee.Image(collection.first())
systemid = image.get('system:id').getInfo()
if (polarization1 == 'VV') or (polarization1 == 'VV,VH'):
projection = image.select('VV').projection().getInfo()['crs']
else:
projection = image.select('VH').projection().getInfo()['crs']
# make into collection of VV, VH or VVVH images and restore linear scale
if polarization == 'VV':
pcollection = collection.map(get_vv)
elif polarization == 'VH':
pcollection = collection.map(get_vh)
else:
pcollection = collection.map(get_vvvh)
# get the list of images and clip to roi
pList = pcollection.toList(count)
first = ee.Dictionary({'imlist':ee.List([]),'rect':rect})
imList = ee.Dictionary(pList.iterate(clipList,first)).get('imlist')
# run the algorithm
result = ee.Dictionary(omnibus(imList,significance,median))
cmap = ee.Image(result.get('cmap'))
smap = ee.Image(result.get('smap'))
fmap = ee.Image(result.get('fmap'))
cmaps = ee.Image.cat(cmap,smap,fmap).rename(['cmap','smap','fmap'])
if assexport == 'assexport':
# export to Assets ---------------------------------
assexport = ee.batch.Export.image.toAsset(cmaps,
description='assetExportTask',
assetId=assexportname,scale=10,maxPixels=1e9)
assexportid = str(assexport.id)
print '****Exporting to Assets, task id: %s '%assexportid
assexport.start()
else:
assexportid = 'none'
if gdexport == 'gdexport':
# export to Drive ----------------------------------
gdexport = ee.batch.Export.image.toDrive(cmaps,
description='driveExportTask',
folder = 'EarthEngineImages',
fileNamePrefix=gdexportname,scale=10,maxPixels=1e9)
gdexportid = str(gdexport.id)
print '****Exporting to Google Drive, task id: %s '%gdexportid
gdexport.start()
else:
gdexportid = 'none'
# --------------------------------------------------
cmapid = cmap.getMapId({'min': 0, 'max':count-1,'palette':'black,blue,yellow,red', 'opacity': 0.5})
fmapid = fmap.getMapId({'min': 0, 'max':count/2,'palette':'black,blue,yellow,red', 'opacity': 0.5})
smapid = smap.getMapId({'min': 0, 'max':count-1,'palette':'black,blue,yellow,red', 'opacity': 0.5})
return render_template('omnibusout.html',
mapid = fmapid['mapid'],
token = fmapid['token'],
centerlon = centerlon,
centerlat = centerlat,
downloadtext = 'Download change maps',
projection = projection,
systemid = systemid,
count = count,
timestamp = timestamp,
assexportid = 'none',
gdexportid = 'none',
timestamps = timestamps,
systemids = systemids,
polarization = polarization1,
relativeorbitnumbers = relativeorbitnumbers)
except Exception as e:
return '<br />An error occurred in omnibus: %s'%e
if __name__ == '__main__':
# import ee
# image = ee.apifunction.ApiFunction.call_("S1.db",ee.Image('TEST/simonf/S1/99/S1B_IW_GRDH_1SDV_20161001T171508_20161001T171533_002316_003E9D_D195'))
app.run(debug=True, host='0.0.0.0')
|
|
# Copyright 2012-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import os
from openstackclient import shell
from tests import utils
DEFAULT_USERNAME = "username"
DEFAULT_PASSWORD = "password"
DEFAULT_TENANT_ID = "xxxx-yyyy-zzzz"
DEFAULT_TENANT_NAME = "tenant"
DEFAULT_TOKEN = "token"
DEFAULT_REGION_NAME = "ZZ9_Plural_Z_Alpha"
DEFAULT_AUTH_URL = "http://127.0.0.1:5000/v2.0/"
DEFAULT_SERVICE_URL = "http://127.0.0.1:8771/v3.0/"
DEFAULT_COMPUTE_API_VERSION = "2"
DEFAULT_IDENTITY_API_VERSION = "2.0"
DEFAULT_IMAGE_API_VERSION = "v2"
DEFAULT_VOLUME_API_VERSION = "1"
LIB_COMPUTE_API_VERSION = "2"
LIB_IDENTITY_API_VERSION = "2.0"
LIB_IMAGE_API_VERSION = "2"
LIB_VOLUME_API_VERSION = "1"
def make_shell():
"""Create a new command shell and mock out some bits."""
_shell = shell.OpenStackShell()
_shell.command_manager = mock.Mock()
return _shell
def fake_execute(shell, cmd):
"""Pretend to execute shell commands."""
return shell.run(cmd.split())
class TestShell(utils.TestCase):
def setUp(self):
super(TestShell, self).setUp()
patch = "openstackclient.shell.OpenStackShell.run_subcommand"
self.cmd_patch = mock.patch(patch)
self.cmd_save = self.cmd_patch.start()
self.app = mock.Mock("Test Shell")
def tearDown(self):
super(TestShell, self).tearDown()
self.cmd_patch.stop()
def _assert_password_auth(self, cmd_options, default_args):
with mock.patch("openstackclient.shell.OpenStackShell.initialize_app",
self.app):
_shell, _cmd = make_shell(), cmd_options + " list tenant"
fake_execute(_shell, _cmd)
self.app.assert_called_with(["list", "tenant"])
self.assertEqual(_shell.options.os_auth_url,
default_args["auth_url"])
self.assertEqual(_shell.options.os_tenant_id,
default_args["tenant_id"])
self.assertEqual(_shell.options.os_tenant_name,
default_args["tenant_name"])
self.assertEqual(_shell.options.os_username,
default_args["username"])
self.assertEqual(_shell.options.os_password,
default_args["password"])
self.assertEqual(_shell.options.os_region_name,
default_args["region_name"])
def _assert_token_auth(self, cmd_options, default_args):
with mock.patch("openstackclient.shell.OpenStackShell.initialize_app",
self.app):
_shell, _cmd = make_shell(), cmd_options + " list role"
fake_execute(_shell, _cmd)
self.app.assert_called_with(["list", "role"])
self.assertEqual(_shell.options.os_token, default_args["os_token"])
self.assertEqual(_shell.options.os_url, default_args["os_url"])
def _assert_cli(self, cmd_options, default_args):
with mock.patch("openstackclient.shell.OpenStackShell.initialize_app",
self.app):
_shell, _cmd = make_shell(), cmd_options + " list server"
fake_execute(_shell, _cmd)
self.app.assert_called_with(["list", "server"])
self.assertEqual(_shell.options.os_compute_api_version,
default_args["compute_api_version"])
self.assertEqual(_shell.options.os_identity_api_version,
default_args["identity_api_version"])
self.assertEqual(_shell.options.os_image_api_version,
default_args["image_api_version"])
self.assertEqual(_shell.options.os_volume_api_version,
default_args["volume_api_version"])
class TestShellHelp(TestShell):
"""Test the deferred help flag"""
def setUp(self):
super(TestShellHelp, self).setUp()
self.orig_env, os.environ = os.environ, {}
def tearDown(self):
super(TestShellHelp, self).tearDown()
os.environ = self.orig_env
def test_help_options(self):
flag = "-h list server"
kwargs = {
"deferred_help": True,
}
with mock.patch("openstackclient.shell.OpenStackShell.initialize_app",
self.app):
_shell, _cmd = make_shell(), flag
fake_execute(_shell, _cmd)
self.assertEqual(_shell.options.deferred_help,
kwargs["deferred_help"])
class TestShellPasswordAuth(TestShell):
def setUp(self):
super(TestShellPasswordAuth, self).setUp()
self.orig_env, os.environ = os.environ, {}
def tearDown(self):
super(TestShellPasswordAuth, self).tearDown()
os.environ = self.orig_env
def test_only_url_flow(self):
flag = "--os-auth-url " + DEFAULT_AUTH_URL
kwargs = {
"auth_url": DEFAULT_AUTH_URL,
"tenant_id": "",
"tenant_name": "",
"username": "",
"password": "",
"region_name": ""
}
self._assert_password_auth(flag, kwargs)
def test_only_tenant_id_flow(self):
flag = "--os-tenant-id " + DEFAULT_TENANT_ID
kwargs = {
"auth_url": "",
"tenant_id": DEFAULT_TENANT_ID,
"tenant_name": "",
"username": "",
"password": "",
"region_name": ""
}
self._assert_password_auth(flag, kwargs)
def test_only_tenant_name_flow(self):
flag = "--os-tenant-name " + DEFAULT_TENANT_NAME
kwargs = {
"auth_url": "",
"tenant_id": "",
"tenant_name": DEFAULT_TENANT_NAME,
"username": "",
"password": "",
"region_name": ""
}
self._assert_password_auth(flag, kwargs)
def test_only_username_flow(self):
flag = "--os-username " + DEFAULT_USERNAME
kwargs = {
"auth_url": "",
"tenant_id": "",
"tenant_name": "",
"username": DEFAULT_USERNAME,
"password": "",
"region_name": ""
}
self._assert_password_auth(flag, kwargs)
def test_only_password_flow(self):
flag = "--os-password " + DEFAULT_PASSWORD
kwargs = {
"auth_url": "",
"tenant_id": "",
"tenant_name": "",
"username": "",
"password": DEFAULT_PASSWORD,
"region_name": ""
}
self._assert_password_auth(flag, kwargs)
def test_only_region_name_flow(self):
flag = "--os-region-name " + DEFAULT_REGION_NAME
kwargs = {
"auth_url": "",
"tenant_id": "",
"tenant_name": "",
"username": "",
"password": "",
"region_name": DEFAULT_REGION_NAME
}
self._assert_password_auth(flag, kwargs)
class TestShellTokenAuth(TestShell):
def setUp(self):
super(TestShellTokenAuth, self).setUp()
env = {
"OS_TOKEN": DEFAULT_TOKEN,
"OS_URL": DEFAULT_SERVICE_URL,
}
self.orig_env, os.environ = os.environ, env.copy()
def tearDown(self):
super(TestShellTokenAuth, self).tearDown()
os.environ = self.orig_env
def test_default_auth(self):
flag = ""
kwargs = {
"os_token": DEFAULT_TOKEN,
"os_url": DEFAULT_SERVICE_URL
}
self._assert_token_auth(flag, kwargs)
def test_empty_auth(self):
os.environ = {}
flag = ""
kwargs = {
"os_token": "",
"os_url": ""
}
self._assert_token_auth(flag, kwargs)
class TestShellCli(TestShell):
def setUp(self):
super(TestShellCli, self).setUp()
env = {
"OS_COMPUTE_API_VERSION": DEFAULT_COMPUTE_API_VERSION,
"OS_IDENTITY_API_VERSION": DEFAULT_IDENTITY_API_VERSION,
"OS_IMAGE_API_VERSION": DEFAULT_IMAGE_API_VERSION,
"OS_VOLUME_API_VERSION": DEFAULT_VOLUME_API_VERSION,
}
self.orig_env, os.environ = os.environ, env.copy()
def tearDown(self):
super(TestShellCli, self).tearDown()
os.environ = self.orig_env
def test_shell_args(self):
_shell = make_shell()
with mock.patch("openstackclient.shell.OpenStackShell.initialize_app",
self.app):
fake_execute(_shell, "list user")
self.app.assert_called_with(["list", "user"])
def test_default_env(self):
flag = ""
kwargs = {
"compute_api_version": DEFAULT_COMPUTE_API_VERSION,
"identity_api_version": DEFAULT_IDENTITY_API_VERSION,
"image_api_version": DEFAULT_IMAGE_API_VERSION,
"volume_api_version": DEFAULT_VOLUME_API_VERSION
}
self._assert_cli(flag, kwargs)
def test_empty_env(self):
os.environ = {}
flag = ""
kwargs = {
"compute_api_version": LIB_COMPUTE_API_VERSION,
"identity_api_version": LIB_IDENTITY_API_VERSION,
"image_api_version": LIB_IMAGE_API_VERSION,
"volume_api_version": LIB_VOLUME_API_VERSION
}
self._assert_cli(flag, kwargs)
|
|
#!/usr/bin/python
import participantCollection
import re
import datetime
import pyperclip
currentMonthIndex = datetime.date.today().month
#TODO: need to figure out how to get total days in current month...
currentMonthTotalDays = 30
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[nextMonthIndex]
currentDayOfMonthIndex = datetime.date.today().day
# TODO: testing...
currentDayOfMonthIndex = 30
# TODO: more...
currentDayOfMonthName = {1:'first', 2:'second', 3:'third', 4:'fourth', 5:'fifth', 6:'sixth', 7:'seventh', 8:'eighth', 9:'ninth', 10:'tenth', 11:'eleventh', 12:'twelfth', 13:'thirteenth', 14:'fourteenth', 15:'fifteenth', 16:'sixteenth', 17:'seventeenth', 18:'eighteenth', 19:'nineteenth', 20:'twentieth', 21:'twenty-first', 22:'twenty-second', 23:'twenty-third', 24:'twenty-fourth', 25:'twenty-fifth', 26:'twenty-sixth', 27:'twenty-seventh', 28:'twenty-eighth', 29:'twenty-ninth', 30:'thirtieth', 31:'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}[datetime.date.today().weekday()]
participantCollection = participantCollection.ParticipantCollection()
numberStillIn = participantCollection.sizeOfParticipantsWhoAreStillIn()
initialNumber = participantCollection.size()
percentStillIn = int(round(100*numberStillIn/initialNumber,0))
# print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%** Here is the list of participants still with the challenge:\n"
def stringToPrintLegacy():
answer = "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer = re.sub( 'NUMBER_STILL_IN', str(numberStillIn), answer )
answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )
answer = re.sub( 'PERCENT_STILL_IN', str(percentStillIn), answer )
for participant in participantCollection.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipants():
answer = ""
for participant in participantCollection.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnFinalDay():
answer = ""
answer += "These participants have checked in at least once in the last 15 days:\n"
answer += "\n"
for participant in participantCollection.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:\n"
answer += "\n"
for participant in participantCollection.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateFor1():
print '1\n\n'
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. We will no longer be accepting new signups. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateFor2to9():
print '2 to 9\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor10to14():
print '10 to 14\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(15-currentDayOfMonthIndex) + " days to make an update comment (if you haven't already) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_INDEX/15!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor15():
print '15\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor16toPenultimate():
print '16 to penultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed in the great purge of CURRENT_MONTH_NAME 15th because you never checked in. However, if you let me know you're still with it I might re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads since CURRENT_MONTH_NAME 15. If it is still there by CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateForUltimate():
print 'Ultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the last day of the Stay Clean: CURRENT_MONTH_NAME challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors. I'm really proud of everyone who signed up for this challenge. Quitting porn is difficult, especially in an era where porn is always as close as a few keystrokes, and triggers are absolutely everywhere. Everybody who gave it their best shot deserves to take a minute right now to feel good about themselves.\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
#TODO: need to do the part where it lists the checked in and non-checked in participants separately.
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnFinalDay()
return answer
def templateToUse():
# return stringToPrintLegacy()
if currentDayOfMonthIndex == 1:
return templateFor1()
#elif ( currentDayOfMonthIndex >= 2 ) and ( currentDayOfMonthIndex <= 9 ):
elif ( 2 <= currentDayOfMonthIndex <= 9 ):
return templateFor2to9()
#elif ( currentDayOfMonthIndex >= 10 ) and ( currentDayOfMonthIndex <= 14 ):
elif ( 10 <= currentDayOfMonthIndex <= 14 ):
return templateFor10to14()
if currentDayOfMonthIndex == 15:
return templateFor15()
#elif ( currentDayOfMonthIndex >= 16 ) and ( currentDayOfMonthIndex <= 14 ):
elif ( currentDayOfMonthIndex >= 16 ) and ( currentDayOfMonthIndex <= currentMonthPenultimateDayIndex ):
return templateFor16toPenultimate()
else:
return templateForUltimate()
def stringToPrint():
answer = templateToUse()
answer = re.sub( 'NUMBER_STILL_IN', str(numberStillIn), answer )
answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )
answer = re.sub( 'PERCENT_STILL_IN', str(percentStillIn), answer )
answer = re.sub( 'CURRENT_MONTH_INDEX', str(currentMonthIndex), answer )
answer = re.sub( 'CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer )
answer = re.sub( 'CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer )
answer = re.sub( 'CURRENT_MONTH_NAME', currentMonthName, answer )
answer = re.sub( 'NEXT_MONTH_INDEX', str(nextMonthIndex), answer )
answer = re.sub( 'NEXT_MONTH_NAME', nextMonthName, answer )
answer = re.sub( 'CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer )
answer = re.sub( 'CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer )
answer = re.sub( 'CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer )
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
# print re.sub('FOO', 'there', 'hello FOO yall')
# for participant in participantCollection.participantsWhoAreStillIn():
# if participant.hasCheckedIn:
# print "/u/" + participant.name
# else:
# print "/u/" + participant.name + " ~"
# print ""
|
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers \
import forms as project_forms
from openstack_dashboard.dashboards.project.loadbalancers \
import tables as project_tables
from openstack_dashboard.dashboards.project.loadbalancers \
import tabs as project_tabs
from openstack_dashboard.dashboards.project.loadbalancers import utils
from openstack_dashboard.dashboards.project.loadbalancers \
import workflows as project_workflows
class IndexView(tabs.TabbedTableView):
tab_group_class = (project_tabs.LoadBalancerTabs)
template_name = 'project/loadbalancers/details_tabs.html'
page_title = _("Load Balancer")
class AddPoolView(workflows.WorkflowView):
workflow_class = project_workflows.AddPool
class AddVipView(workflows.WorkflowView):
workflow_class = project_workflows.AddVip
def get_initial(self):
initial = super(AddVipView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['subnet'] = api.neutron.subnet_get(
self.request, pool.subnet_id).cidr
except Exception as e:
initial['subnet'] = ''
msg = _('Unable to retrieve pool subnet. %s') % e
exceptions.handle(self.request, msg)
return initial
class AddMemberView(workflows.WorkflowView):
workflow_class = project_workflows.AddMember
class AddMonitorView(workflows.WorkflowView):
workflow_class = project_workflows.AddMonitor
class PoolDetailsView(tabs.TabView):
tab_group_class = project_tabs.PoolDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ pool.name|default:pool.id }}"
@memoized.memoized_method
def get_data(self):
pid = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, pid)
except Exception:
pool = []
exceptions.handle(self.request,
_('Unable to retrieve pool details.'))
else:
for monitor in pool.health_monitors:
display_name = utils.get_monitor_display_name(monitor)
setattr(monitor, 'display_name', display_name)
return pool
def get_context_data(self, **kwargs):
context = super(PoolDetailsView, self).get_context_data(**kwargs)
pool = self.get_data()
context['pool'] = pool
table = project_tables.PoolsTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(pool)
return context
def get_tabs(self, request, *args, **kwargs):
pool = self.get_data()
return self.tab_group_class(self.request, pool=pool, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class VipDetailsView(tabs.TabView):
tab_group_class = project_tabs.VipDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ vip.name|default:vip_id }}"
@memoized.memoized_method
def get_data(self):
vid = self.kwargs['vip_id']
vip = []
try:
vip = api.lbaas.vip_get(self.request, vid)
fips = api.network.tenant_floating_ip_list(self.request)
vip_fip = [fip for fip in fips
if fip.port_id == vip.port.id]
if vip_fip:
vip.fip = vip_fip[0]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve VIP details.'))
return vip
def get_context_data(self, **kwargs):
context = super(VipDetailsView, self).get_context_data(**kwargs)
vip = self.get_data()
context['vip'] = vip
vip_nav = vip.pool.name_or_id
breadcrumb = [
(vip_nav,
reverse('horizon:project:loadbalancers:vipdetails',
args=(vip.id,))),
(_("VIP"), None)
]
context["custom_breadcrumb"] = breadcrumb
return context
def get_tabs(self, request, *args, **kwargs):
vip = self.get_data()
return self.tab_group_class(request, vip=vip, **kwargs)
@staticmethod
def get_redirect_url():
return reverse("horizon:project:loadbalancers:index")
class MemberDetailsView(tabs.TabView):
tab_group_class = project_tabs.MemberDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ member.name|default:member.id }}"
@memoized.memoized_method
def get_data(self):
mid = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, mid)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve member details.'))
def get_context_data(self, **kwargs):
context = super(MemberDetailsView, self).get_context_data(**kwargs)
member = self.get_data()
context['member'] = member
member_nav = member.pool.name_or_id
breadcrumb = [
(member_nav,
reverse('horizon:project:loadbalancers:pooldetails',
args=(member.pool.id,))),
(_("Members"), reverse('horizon:project:loadbalancers:members')),
]
context["custom_breadcrumb"] = breadcrumb
table = project_tables.MembersTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(member)
return context
def get_tabs(self, request, *args, **kwargs):
member = self.get_data()
return self.tab_group_class(request, member=member, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class MonitorDetailsView(tabs.TabView):
tab_group_class = project_tabs.MonitorDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ monitor.name|default:monitor.id }}"
@memoized.memoized_method
def get_data(self):
mid = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, mid)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve monitor details.'))
def get_context_data(self, **kwargs):
context = super(MonitorDetailsView, self).get_context_data(**kwargs)
monitor = self.get_data()
context['monitor'] = monitor
breadcrumb = [
(_("Monitors"), reverse('horizon:project:loadbalancers:monitors')),
]
context["custom_breadcrumb"] = breadcrumb
table = project_tables.MonitorsTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(monitor)
return context
def get_tabs(self, request, *args, **kwargs):
monitor = self.get_data()
return self.tab_group_class(request, monitor=monitor, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class UpdatePoolView(forms.ModalFormView):
form_class = project_forms.UpdatePool
form_id = "update_pool_form"
modal_header = _("Edit Pool")
template_name = "project/loadbalancers/updatepool.html"
context_object_name = 'pool'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatepool"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Pool")
def get_context_data(self, **kwargs):
context = super(UpdatePoolView, self).get_context_data(**kwargs)
context["pool_id"] = self.kwargs['pool_id']
args = (self.kwargs['pool_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
pool_id = self.kwargs['pool_id']
try:
return api.lbaas.pool_get(self.request, pool_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve pool details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
pool = self._get_object()
return {'name': pool['name'],
'pool_id': pool['id'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up']}
class UpdateVipView(forms.ModalFormView):
form_class = project_forms.UpdateVip
form_id = "update_vip_form"
modal_header = _("Edit VIP")
template_name = "project/loadbalancers/updatevip.html"
context_object_name = 'vip'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatevip"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit VIP")
def get_context_data(self, **kwargs):
context = super(UpdateVipView, self).get_context_data(**kwargs)
context["vip_id"] = self.kwargs['vip_id']
args = (self.kwargs['vip_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
vip_id = self.kwargs['vip_id']
try:
return api.lbaas.vip_get(self.request, vip_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve VIP details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
vip = self._get_object()
persistence = getattr(vip, 'session_persistence', None)
if persistence:
stype = persistence['type']
if stype == 'APP_COOKIE':
cookie = persistence['cookie_name']
else:
cookie = ''
else:
stype = ''
cookie = ''
return {'name': vip['name'],
'vip_id': vip['id'],
'description': vip['description'],
'pool_id': vip['pool_id'],
'session_persistence': stype,
'cookie_name': cookie,
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up']}
class UpdateMemberView(forms.ModalFormView):
form_class = project_forms.UpdateMember
form_id = "update_pool_form"
modal_header = _("Edit Member")
template_name = "project/loadbalancers/updatemember.html"
context_object_name = 'member'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatemember"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Member")
def get_context_data(self, **kwargs):
context = super(UpdateMemberView, self).get_context_data(**kwargs)
context["member_id"] = self.kwargs['member_id']
args = (self.kwargs['member_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
member_id = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, member_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve member details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
member = self._get_object()
return {'member_id': member['id'],
'pool_id': member['pool_id'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up']}
class UpdateMonitorView(forms.ModalFormView):
form_class = project_forms.UpdateMonitor
form_id = "update_monitor_form"
modal_header = _("Edit Monitor")
template_name = "project/loadbalancers/updatemonitor.html"
context_object_name = 'monitor'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatemonitor"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Monitor")
def get_context_data(self, **kwargs):
context = super(UpdateMonitorView, self).get_context_data(**kwargs)
context["monitor_id"] = self.kwargs['monitor_id']
args = (self.kwargs['monitor_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
monitor_id = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, monitor_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve health monitor details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
monitor = self._get_object()
return {'monitor_id': monitor['id'],
'delay': monitor['delay'],
'timeout': monitor['timeout'],
'max_retries': monitor['max_retries'],
'admin_state_up': monitor['admin_state_up']}
class AddPMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.AddPMAssociation
def get_initial(self):
initial = super(AddPMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
class DeletePMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.DeletePMAssociation
def get_initial(self):
initial = super(DeletePMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create threads to run multiple enqueue ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import weakref
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
_DEPRECATION_INSTRUCTION = (
"To construct input pipelines, use the `tf.data` module.")
@tf_export(v1=["train.queue_runner.QueueRunner", "train.QueueRunner"])
class QueueRunner(object):
"""Holds a list of enqueue operations for a queue, each to be run in a thread.
Queues are a convenient TensorFlow mechanism to compute tensors
asynchronously using multiple threads. For example in the canonical 'Input
Reader' setup one set of threads generates filenames in a queue; a second set
of threads read records from the files, processes them, and enqueues tensors
on a second queue; a third set of threads dequeues these input records to
construct batches and runs them through training operations.
There are several delicate issues when running multiple threads that way:
closing the queues in sequence as the input is exhausted, correctly catching
and reporting exceptions, etc.
The `QueueRunner`, combined with the `Coordinator`, helps handle these issues.
@compatibility(TF2)
QueueRunners are not compatible with eager execution. Instead, please
use [tf.data](https://www.tensorflow.org/guide/data) to get data into your
model.
@end_compatibility
"""
@deprecation.deprecated(None, _DEPRECATION_INSTRUCTION)
def __init__(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, queue_closed_exception_types=None,
queue_runner_def=None, import_scope=None):
"""Create a QueueRunner.
On construction the `QueueRunner` adds an op to close the queue. That op
will be run if the enqueue ops raise exceptions.
When you later call the `create_threads()` method, the `QueueRunner` will
create one thread for each op in `enqueue_ops`. Each thread will run its
enqueue op in parallel with the other threads. The enqueue ops do not have
to all be the same op, but it is expected that they all enqueue tensors in
`queue`.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
queue_closed_exception_types: Optional tuple of Exception types that
indicate that the queue has been closed when raised during an enqueue
operation. Defaults to `(tf.errors.OutOfRangeError,)`. Another common
case includes `(tf.errors.OutOfRangeError, tf.errors.CancelledError)`,
when some of the enqueue ops may dequeue from other Queues.
queue_runner_def: Optional `QueueRunnerDef` protocol buffer. If specified,
recreates the QueueRunner from its contents. `queue_runner_def` and the
other arguments are mutually exclusive.
import_scope: Optional `string`. Name scope to add. Only used when
initializing from protocol buffer.
Raises:
ValueError: If both `queue_runner_def` and `queue` are both specified.
ValueError: If `queue` or `enqueue_ops` are not provided when not
restoring from `queue_runner_def`.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError(
"QueueRunners are not supported when eager execution is enabled. "
"Instead, please use tf.data to get data into your model.")
if queue_runner_def:
if queue or enqueue_ops:
raise ValueError("queue_runner_def and queue are mutually exclusive.")
self._init_from_proto(queue_runner_def,
import_scope=import_scope)
else:
self._init_from_args(
queue=queue, enqueue_ops=enqueue_ops,
close_op=close_op, cancel_op=cancel_op,
queue_closed_exception_types=queue_closed_exception_types)
# Protect the count of runs to wait for.
self._lock = threading.Lock()
# A map from a session object to the number of outstanding queue runner
# threads for that session.
self._runs_per_session = weakref.WeakKeyDictionary()
# List of exceptions raised by the running threads.
self._exceptions_raised = []
def _init_from_args(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, queue_closed_exception_types=None):
"""Create a QueueRunner from arguments.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
queue_closed_exception_types: Tuple of exception types, which indicate
the queue has been safely closed.
Raises:
ValueError: If `queue` or `enqueue_ops` are not provided when not
restoring from `queue_runner_def`.
TypeError: If `queue_closed_exception_types` is provided, but is not
a non-empty tuple of error types (subclasses of `tf.errors.OpError`).
"""
if not queue or not enqueue_ops:
raise ValueError("Must provide queue and enqueue_ops.")
self._queue = queue
self._enqueue_ops = enqueue_ops
self._close_op = close_op
self._cancel_op = cancel_op
if queue_closed_exception_types is not None:
if (not isinstance(queue_closed_exception_types, tuple)
or not queue_closed_exception_types
or not all(issubclass(t, errors.OpError)
for t in queue_closed_exception_types)):
raise TypeError(
"queue_closed_exception_types, when provided, "
"must be a tuple of tf.error types, but saw: %s"
% queue_closed_exception_types)
self._queue_closed_exception_types = queue_closed_exception_types
# Close when no more will be produced, but pending enqueues should be
# preserved.
if self._close_op is None:
self._close_op = self._queue.close()
# Close and cancel pending enqueues since there was an error and we want
# to unblock everything so we can cleanly exit.
if self._cancel_op is None:
self._cancel_op = self._queue.close(cancel_pending_enqueues=True)
if not self._queue_closed_exception_types:
self._queue_closed_exception_types = (errors.OutOfRangeError,)
else:
self._queue_closed_exception_types = tuple(
self._queue_closed_exception_types)
def _init_from_proto(self, queue_runner_def, import_scope=None):
"""Create a QueueRunner from `QueueRunnerDef`.
Args:
queue_runner_def: Optional `QueueRunnerDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(queue_runner_def, queue_runner_pb2.QueueRunnerDef)
g = ops.get_default_graph()
self._queue = g.as_graph_element(
ops.prepend_name_scope(queue_runner_def.queue_name, import_scope))
self._enqueue_ops = [g.as_graph_element(
ops.prepend_name_scope(op, import_scope))
for op in queue_runner_def.enqueue_op_name]
self._close_op = g.as_graph_element(ops.prepend_name_scope(
queue_runner_def.close_op_name, import_scope))
self._cancel_op = g.as_graph_element(ops.prepend_name_scope(
queue_runner_def.cancel_op_name, import_scope))
self._queue_closed_exception_types = tuple(
errors.exception_type_from_error_code(code)
for code in queue_runner_def.queue_closed_exception_types)
# Legacy support for old QueueRunnerDefs created before this field
# was added.
if not self._queue_closed_exception_types:
self._queue_closed_exception_types = (errors.OutOfRangeError,)
@property
def queue(self):
return self._queue
@property
def enqueue_ops(self):
return self._enqueue_ops
@property
def close_op(self):
return self._close_op
@property
def cancel_op(self):
return self._cancel_op
@property
def queue_closed_exception_types(self):
return self._queue_closed_exception_types
@property
def exceptions_raised(self):
"""Exceptions raised but not handled by the `QueueRunner` threads.
Exceptions raised in queue runner threads are handled in one of two ways
depending on whether or not a `Coordinator` was passed to
`create_threads()`:
* With a `Coordinator`, exceptions are reported to the coordinator and
forgotten by the `QueueRunner`.
* Without a `Coordinator`, exceptions are captured by the `QueueRunner` and
made available in this `exceptions_raised` property.
Returns:
A list of Python `Exception` objects. The list is empty if no exception
was captured. (No exceptions are captured when using a Coordinator.)
"""
return self._exceptions_raised
@property
def name(self):
"""The string name of the underlying Queue."""
return self._queue.name
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A Session.
enqueue_op: The Operation to run.
coord: Optional Coordinator object for reporting errors and checking
for stop conditions.
"""
decremented = False
try:
# Make a cached callable from the `enqueue_op` to decrease the
# Python overhead in the queue-runner loop.
enqueue_callable = sess.make_callable(enqueue_op)
while True:
if coord and coord.should_stop():
break
try:
enqueue_callable()
except self._queue_closed_exception_types: # pylint: disable=catching-non-exception
# This exception indicates that a queue was closed.
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
def _close_on_stop(self, sess, cancel_op, coord):
"""Close the queue when the Coordinator requests stop.
Args:
sess: A Session.
cancel_op: The Operation to run.
coord: Coordinator.
"""
coord.wait_for_stop()
try:
sess.run(cancel_op)
except Exception as e:
# Intentionally ignore errors from cancel_op.
logging.vlog(1, "Ignored exception: %s", str(e))
# pylint: enable=broad-except
def create_threads(self, sess, coord=None, daemon=False, start=False):
"""Create threads to run the enqueue ops for the given session.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
If previously created threads for the given session are still running, no
new threads will be created.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
"""
with self._lock:
try:
if self._runs_per_session[sess] > 0:
# Already started: no new threads to return.
return []
except KeyError:
# We haven't seen this session yet.
pass
self._runs_per_session[sess] = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = []
for op in self._enqueue_ops:
name = "QueueRunnerThread-{}-{}".format(self.name, op.name)
ret_threads.append(threading.Thread(target=self._run,
args=(sess, op, coord),
name=name))
if coord:
name = "QueueRunnerThread-{}-close_on_stop".format(self.name)
ret_threads.append(threading.Thread(target=self._close_on_stop,
args=(sess, self._cancel_op, coord),
name=name))
for t in ret_threads:
if coord:
coord.register_thread(t)
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
def to_proto(self, export_scope=None):
"""Converts this `QueueRunner` to a `QueueRunnerDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `QueueRunnerDef` protocol buffer, or `None` if the `Variable` is not in
the specified name scope.
"""
if (export_scope is None or
self.queue.name.startswith(export_scope)):
queue_runner_def = queue_runner_pb2.QueueRunnerDef()
queue_runner_def.queue_name = ops.strip_name_scope(
self.queue.name, export_scope)
for enqueue_op in self.enqueue_ops:
queue_runner_def.enqueue_op_name.append(
ops.strip_name_scope(enqueue_op.name, export_scope))
queue_runner_def.close_op_name = ops.strip_name_scope(
self.close_op.name, export_scope)
queue_runner_def.cancel_op_name = ops.strip_name_scope(
self.cancel_op.name, export_scope)
queue_runner_def.queue_closed_exception_types.extend([
errors.error_code_from_exception_type(cls)
for cls in self._queue_closed_exception_types])
return queue_runner_def
else:
return None
@staticmethod
def from_proto(queue_runner_def, import_scope=None):
"""Returns a `QueueRunner` object created from `queue_runner_def`."""
return QueueRunner(queue_runner_def=queue_runner_def,
import_scope=import_scope)
@tf_export(v1=["train.queue_runner.add_queue_runner", "train.add_queue_runner"])
@deprecation.deprecated(None, _DEPRECATION_INSTRUCTION)
def add_queue_runner(qr, collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Adds a `QueueRunner` to a collection in the graph.
When building a complex model that uses many queues it is often difficult to
gather all the queue runners that need to be run. This convenience function
allows you to add a queue runner to a well known collection in the graph.
The companion method `start_queue_runners()` can be used to start threads for
all the collected queue runners.
@compatibility(TF2)
QueueRunners are not compatible with eager execution. Instead, please
use [tf.data](https://www.tensorflow.org/guide/data) to get data into your
model.
@end_compatibility
Args:
qr: A `QueueRunner`.
collection: A `GraphKey` specifying the graph collection to add
the queue runner to. Defaults to `GraphKeys.QUEUE_RUNNERS`.
"""
ops.add_to_collection(collection, qr)
@tf_export(v1=["train.queue_runner.start_queue_runners",
"train.start_queue_runners"])
@deprecation.deprecated(None, _DEPRECATION_INSTRUCTION)
def start_queue_runners(sess=None, coord=None, daemon=True, start=True,
collection=ops.GraphKeys.QUEUE_RUNNERS):
"""Starts all queue runners collected in the graph.
This is a companion method to `add_queue_runner()`. It just starts
threads for all queue runners collected in the graph. It returns
the list of all threads.
@compatibility(TF2)
QueueRunners are not compatible with eager execution. Instead, please
use [tf.data](https://www.tensorflow.org/guide/data) to get data into your
model.
@end_compatibility
Args:
sess: `Session` used to run the queue ops. Defaults to the
default session.
coord: Optional `Coordinator` for coordinating the started threads.
daemon: Whether the threads should be marked as `daemons`, meaning
they don't block program exit.
start: Set to `False` to only create the threads, not start them.
collection: A `GraphKey` specifying the graph collection to
get the queue runners from. Defaults to `GraphKeys.QUEUE_RUNNERS`.
Raises:
ValueError: if `sess` is None and there isn't any default session.
TypeError: if `sess` is not a `tf.compat.v1.Session` object.
Returns:
A list of threads.
Raises:
RuntimeError: If called with eager execution enabled.
ValueError: If called without a default `tf.compat.v1.Session` registered.
"""
if context.executing_eagerly():
raise RuntimeError("Queues are not compatible with eager execution.")
if sess is None:
sess = ops.get_default_session()
if not sess:
raise ValueError("Cannot start queue runners: No default session is "
"registered. Use `with sess.as_default()` or pass an "
"explicit session to tf.start_queue_runners(sess=sess)")
if not isinstance(sess, session.SessionInterface):
# Following check is due to backward compatibility. (b/62061352)
if sess.__class__.__name__ in [
"MonitoredSession", "SingularMonitoredSession"]:
return []
raise TypeError("sess must be a `tf.Session` object. "
"Given class: {}".format(sess.__class__))
queue_runners = ops.get_collection(collection)
if not queue_runners:
logging.warning(
"`tf.train.start_queue_runners()` was called when no queue runners "
"were defined. You can safely remove the call to this deprecated "
"function.")
with sess.graph.as_default():
threads = []
for qr in ops.get_collection(collection):
threads.extend(qr.create_threads(sess, coord=coord, daemon=daemon,
start=start))
return threads
ops.register_proto_function(ops.GraphKeys.QUEUE_RUNNERS,
proto_type=queue_runner_pb2.QueueRunnerDef,
to_proto=QueueRunner.to_proto,
from_proto=QueueRunner.from_proto)
|
|
# Natural Language Toolkit: Interface to the Mace4 Model Builder
#
# Author: Dan Garrette <dhgarrette@gmail.com>
# Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A model builder that makes use of the external 'Mace4' package.
"""
import os
import tempfile
from nltk.sem.logic import is_indvar
from nltk.sem import Valuation, LogicParser
from nltk.inference.api import ModelBuilder, BaseModelBuilderCommand
from nltk.inference.prover9 import Prover9CommandParent, Prover9Parent
class MaceCommand(Prover9CommandParent, BaseModelBuilderCommand):
"""
A ``MaceCommand`` specific to the ``Mace`` model builder. It contains
a print_assumptions() method that is used to print the list
of assumptions in multiple formats.
"""
_interpformat_bin = None
def __init__(self, goal=None, assumptions=None, max_models=500, model_builder=None):
"""
:param goal: Input expression to prove
:type goal: sem.Expression
:param assumptions: Input expressions to use as assumptions in
the proof.
:type assumptions: list(sem.Expression)
:param max_models: The maximum number of models that Mace will try before
simply returning false. (Use 0 for no maximum.)
:type max_models: int
"""
if model_builder is not None:
assert isinstance(model_builder, Mace)
else:
model_builder = Mace(max_models)
BaseModelBuilderCommand.__init__(self, model_builder, goal, assumptions)
@property
def valuation(mbc): return mbc.model('valuation')
def _convert2val(self, valuation_str):
"""
Transform the output file into an NLTK-style Valuation.
:return: A model if one is generated; None otherwise.
:rtype: sem.Valuation
"""
valuation_standard_format = self._transform_output(valuation_str, 'standard')
val = []
for line in valuation_standard_format.splitlines(False):
l = line.strip()
if l.startswith('interpretation'):
# find the number of entities in the model
num_entities = int(l[l.index('(')+1:l.index(',')].strip())
elif l.startswith('function') and l.find('_') == -1:
# replace the integer identifier with a corresponding alphabetic character
name = l[l.index('(')+1:l.index(',')].strip()
if is_indvar(name):
name = name.upper()
value = int(l[l.index('[')+1:l.index(']')].strip())
val.append((name, MaceCommand._make_model_var(value)))
elif l.startswith('relation'):
l = l[l.index('(')+1:]
if '(' in l:
#relation is not nullary
name = l[:l.index('(')].strip()
values = [int(v.strip()) for v in l[l.index('[')+1:l.index(']')].split(',')]
val.append((name, MaceCommand._make_relation_set(num_entities, values)))
else:
#relation is nullary
name = l[:l.index(',')].strip()
value = int(l[l.index('[')+1:l.index(']')].strip())
val.append((name, value == 1))
return Valuation(val)
@staticmethod
def _make_relation_set(num_entities, values):
"""
Convert a Mace4-style relation table into a dictionary.
:param num_entities: the number of entities in the model; determines the row length in the table.
:type num_entities: int
:param values: a list of 1's and 0's that represent whether a relation holds in a Mace4 model.
:type values: list of int
"""
r = set()
for position in [pos for (pos,v) in enumerate(values) if v == 1]:
r.add(tuple(MaceCommand._make_relation_tuple(position, values, num_entities)))
return r
@staticmethod
def _make_relation_tuple(position, values, num_entities):
if len(values) == 1:
return []
else:
sublist_size = len(values) / num_entities
sublist_start = position / sublist_size
sublist_position = position % sublist_size
sublist = values[sublist_start*sublist_size:(sublist_start+1)*sublist_size]
return [MaceCommand._make_model_var(sublist_start)] + \
MaceCommand._make_relation_tuple(sublist_position,
sublist,
num_entities)
@staticmethod
def _make_model_var(value):
"""
Pick an alphabetic character as identifier for an entity in the model.
:param value: where to index into the list of characters
:type value: int
"""
letter = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n',
'o','p','q','r','s','t','u','v','w','x','y','z'][value]
num = int(value) / 26
if num > 0:
return letter + str(num)
else:
return letter
def _decorate_model(self, valuation_str, format):
"""
Print out a Mace4 model using any Mace4 ``interpformat`` format.
See http://www.cs.unm.edu/~mccune/mace4/manual/ for details.
:param valuation_str: str with the model builder's output
:param format: str indicating the format for displaying
models. Defaults to 'standard' format.
:return: str
"""
if not format:
return valuation_str
elif format == 'valuation':
return self._convert2val(valuation_str)
else:
return self._transform_output(valuation_str, format)
def _transform_output(self, valuation_str, format):
"""
Transform the output file into any Mace4 ``interpformat`` format.
:param format: Output format for displaying models.
:type format: str
"""
if format in ['standard', 'standard2', 'portable', 'tabular',
'raw', 'cooked', 'xml', 'tex']:
return self._call_interpformat(valuation_str, [format])[0]
else:
raise LookupError("The specified format does not exist")
def _call_interpformat(self, input_str, args=[], verbose=False):
"""
Call the ``interpformat`` binary with the given input.
:param input_str: A string whose contents are used as stdin.
:param args: A list of command-line arguments.
:return: A tuple (stdout, returncode)
:see: ``config_prover9``
"""
if self._interpformat_bin is None:
self._interpformat_bin = self._modelbuilder._find_binary(
'interpformat', verbose)
return self._modelbuilder._call(input_str, self._interpformat_bin,
args, verbose)
class Mace(Prover9Parent, ModelBuilder):
_mace4_bin = None
def __init__(self, end_size=500):
self._end_size = end_size
"""The maximum model size that Mace will try before
simply returning false. (Use -1 for no maximum.)"""
def _build_model(self, goal=None, assumptions=None, verbose=False):
"""
Use Mace4 to build a first order model.
:return: ``True`` if a model was found (i.e. Mace returns value of 0),
else ``False``
"""
if not assumptions:
assumptions = []
stdout, returncode = self._call_mace4(self.prover9_input(goal, assumptions),
verbose=verbose)
return (returncode == 0, stdout)
def _call_mace4(self, input_str, args=[], verbose=False):
"""
Call the ``mace4`` binary with the given input.
:param input_str: A string whose contents are used as stdin.
:param args: A list of command-line arguments.
:return: A tuple (stdout, returncode)
:see: ``config_prover9``
"""
if self._mace4_bin is None:
self._mace4_bin = self._find_binary('mace4', verbose)
updated_input_str = ''
if self._end_size > 0:
updated_input_str += 'assign(end_size, %d).\n\n' % self._end_size
updated_input_str += input_str
return self._call(updated_input_str, self._mace4_bin, args, verbose)
def spacer(num=30):
print '-' * num
def decode_result(found):
"""
Decode the result of model_found()
:param found: The output of model_found()
:type found: bool
"""
return {True: 'Countermodel found', False: 'No countermodel found', None: 'None'}[found]
def test_model_found(arguments):
"""
Try some proofs and exhibit the results.
"""
lp = LogicParser()
for (goal, assumptions) in arguments:
g = lp.parse(goal)
alist = [lp.parse(a) for a in assumptions]
m = MaceCommand(g, assumptions=alist, end_size=50)
found = m.build_model()
for a in alist:
print ' %s' % a
print '|- %s: %s\n' % (g, decode_result(found))
def test_build_model(arguments):
"""
Try to build a ``nltk.sem.Valuation``.
"""
lp = LogicParser()
g = lp.parse('all x.man(x)')
alist = [lp.parse(a) for a in ['man(John)',
'man(Socrates)',
'man(Bill)',
'some x.(-(x = John) & man(x) & sees(John,x))',
'some x.(-(x = Bill) & man(x))',
'all x.some y.(man(x) -> gives(Socrates,x,y))']]
m = MaceCommand(g, assumptions=alist)
m.build_model()
spacer()
print "Assumptions and Goal"
spacer()
for a in alist:
print ' %s' % a
print '|- %s: %s\n' % (g, decode_result(m.build_model()))
spacer()
#print m.model('standard')
#print m.model('cooked')
print "Valuation"
spacer()
print m.valuation, '\n'
def test_transform_output(argument_pair):
"""
Transform the model into various Mace4 ``interpformat`` formats.
"""
lp = LogicParser()
g = lp.parse(argument_pair[0])
alist = [lp.parse(a) for a in argument_pair[1]]
m = MaceCommand(g, assumptions=alist)
m.build_model()
for a in alist:
print ' %s' % a
print '|- %s: %s\n' % (g, m.build_model())
for format in ['standard', 'portable', 'xml', 'cooked']:
spacer()
print "Using '%s' format" % format
spacer()
print m.model(format=format)
def test_make_relation_set():
print MaceCommand._make_relation_set(num_entities=3, values=[1,0,1]) == set([('c',), ('a',)])
print MaceCommand._make_relation_set(num_entities=3, values=[0,0,0,0,0,0,1,0,0]) == set([('c', 'a')])
print MaceCommand._make_relation_set(num_entities=2, values=[0,0,1,0,0,0,1,0]) == set([('a', 'b', 'a'), ('b', 'b', 'a')])
arguments = [
('mortal(Socrates)', ['all x.(man(x) -> mortal(x))', 'man(Socrates)']),
('(not mortal(Socrates))', ['all x.(man(x) -> mortal(x))', 'man(Socrates)'])
]
def demo():
test_model_found(arguments)
test_build_model(arguments)
test_transform_output(arguments[1])
if __name__ == '__main__':
demo()
|
|
import builtins
import os
import select
import socket
import sys
import unittest
import errno
from errno import EEXIST
class SubOSError(OSError):
pass
class SubOSErrorWithInit(OSError):
def __init__(self, message, bar):
self.bar = bar
super().__init__(message)
class SubOSErrorWithNew(OSError):
def __new__(cls, message, baz):
self = super().__new__(cls, message)
self.baz = baz
return self
class SubOSErrorCombinedInitFirst(SubOSErrorWithInit, SubOSErrorWithNew):
pass
class SubOSErrorCombinedNewFirst(SubOSErrorWithNew, SubOSErrorWithInit):
pass
class SubOSErrorWithStandaloneInit(OSError):
def __init__(self):
pass
class HierarchyTest(unittest.TestCase):
def test_builtin_errors(self):
self.assertEqual(OSError.__name__, 'OSError')
self.assertIs(IOError, OSError)
self.assertIs(EnvironmentError, OSError)
def test_socket_errors(self):
self.assertIs(socket.error, IOError)
self.assertIs(socket.gaierror.__base__, OSError)
self.assertIs(socket.herror.__base__, OSError)
self.assertIs(socket.timeout.__base__, OSError)
def test_select_error(self):
self.assertIs(select.error, OSError)
# mmap.error is tested in test_mmap
_pep_map = """
+-- BlockingIOError EAGAIN, EALREADY, EWOULDBLOCK, EINPROGRESS
+-- ChildProcessError ECHILD
+-- ConnectionError
+-- BrokenPipeError EPIPE, ESHUTDOWN
+-- ConnectionAbortedError ECONNABORTED
+-- ConnectionRefusedError ECONNREFUSED
+-- ConnectionResetError ECONNRESET
+-- FileExistsError EEXIST
+-- FileNotFoundError ENOENT
+-- InterruptedError EINTR
+-- IsADirectoryError EISDIR
+-- NotADirectoryError ENOTDIR
+-- PermissionError EACCES, EPERM
+-- ProcessLookupError ESRCH
+-- TimeoutError ETIMEDOUT
"""
def _make_map(s):
_map = {}
for line in s.splitlines():
line = line.strip('+- ')
if not line:
continue
excname, _, errnames = line.partition(' ')
for errname in filter(None, errnames.strip().split(', ')):
_map[getattr(errno, errname)] = getattr(builtins, excname)
return _map
_map = _make_map(_pep_map)
def test_errno_mapping(self):
# The OSError constructor maps errnos to subclasses
# A sample test for the basic functionality
e = OSError(EEXIST, "Bad file descriptor")
self.assertIs(type(e), FileExistsError)
# Exhaustive testing
for errcode, exc in self._map.items():
e = OSError(errcode, "Some message")
self.assertIs(type(e), exc)
othercodes = set(errno.errorcode) - set(self._map)
for errcode in othercodes:
e = OSError(errcode, "Some message")
self.assertIs(type(e), OSError)
def test_try_except(self):
filename = "some_hopefully_non_existing_file"
# This checks that try .. except checks the concrete exception
# (FileNotFoundError) and not the base type specified when
# PyErr_SetFromErrnoWithFilenameObject was called.
# (it is therefore deliberate that it doesn't use assertRaises)
try:
open(filename)
except FileNotFoundError:
pass
else:
self.fail("should have raised a FileNotFoundError")
# Another test for PyErr_SetExcFromWindowsErrWithFilenameObject()
self.assertFalse(os.path.exists(filename))
try:
os.unlink(filename)
except FileNotFoundError:
pass
else:
self.fail("should have raised a FileNotFoundError")
class AttributesTest(unittest.TestCase):
def test_windows_error(self):
if os.name == "nt":
self.assertIn('winerror', dir(OSError))
else:
self.assertNotIn('winerror', dir(OSError))
def test_posix_error(self):
e = OSError(EEXIST, "File already exists", "foo.txt")
self.assertEqual(e.errno, EEXIST)
self.assertEqual(e.args[0], EEXIST)
self.assertEqual(e.strerror, "File already exists")
self.assertEqual(e.filename, "foo.txt")
if os.name == "nt":
self.assertEqual(e.winerror, None)
@unittest.skipUnless(os.name == "nt", "Windows-specific test")
def test_errno_translation(self):
# ERROR_ALREADY_EXISTS (183) -> EEXIST
e = OSError(0, "File already exists", "foo.txt", 183)
self.assertEqual(e.winerror, 183)
self.assertEqual(e.errno, EEXIST)
self.assertEqual(e.args[0], EEXIST)
self.assertEqual(e.strerror, "File already exists")
self.assertEqual(e.filename, "foo.txt")
def test_blockingioerror(self):
args = ("a", "b", "c", "d", "e")
for n in range(6):
e = BlockingIOError(*args[:n])
with self.assertRaises(AttributeError):
e.characters_written
e = BlockingIOError("a", "b", 3)
self.assertEqual(e.characters_written, 3)
e.characters_written = 5
self.assertEqual(e.characters_written, 5)
class ExplicitSubclassingTest(unittest.TestCase):
def test_errno_mapping(self):
# When constructing an OSError subclass, errno mapping isn't done
e = SubOSError(EEXIST, "Bad file descriptor")
self.assertIs(type(e), SubOSError)
def test_init_overridden(self):
e = SubOSErrorWithInit("some message", "baz")
self.assertEqual(e.bar, "baz")
self.assertEqual(e.args, ("some message",))
def test_init_kwdargs(self):
e = SubOSErrorWithInit("some message", bar="baz")
self.assertEqual(e.bar, "baz")
self.assertEqual(e.args, ("some message",))
def test_new_overridden(self):
e = SubOSErrorWithNew("some message", "baz")
self.assertEqual(e.baz, "baz")
self.assertEqual(e.args, ("some message",))
def test_new_kwdargs(self):
e = SubOSErrorWithNew("some message", baz="baz")
self.assertEqual(e.baz, "baz")
self.assertEqual(e.args, ("some message",))
def test_init_new_overridden(self):
e = SubOSErrorCombinedInitFirst("some message", "baz")
self.assertEqual(e.bar, "baz")
self.assertEqual(e.baz, "baz")
self.assertEqual(e.args, ("some message",))
e = SubOSErrorCombinedNewFirst("some message", "baz")
self.assertEqual(e.bar, "baz")
self.assertEqual(e.baz, "baz")
self.assertEqual(e.args, ("some message",))
def test_init_standalone(self):
# __init__ doesn't propagate to OSError.__init__ (see issue #15229)
e = SubOSErrorWithStandaloneInit()
self.assertEqual(e.args, ())
self.assertEqual(str(e), '')
if __name__ == "__main__":
unittest.main()
|
|
import copy
import json
import logging
import traceback
from dart.context.database import db
from dart.context.locator import injectable
from dart.model.action import Action
from dart.model.dataset import Dataset
from dart.model.datastore import Datastore
from dart.model.event import Event
from dart.model.graph import EntityType, Relationship, Edge
from dart.model.subscription import Subscription
from dart.model.trigger import Trigger
from dart.model.workflow import Workflow
from dart.trigger.super import super_trigger
_logger = logging.getLogger(__name__)
@injectable
class GraphEntityResolverService(object):
def __init__(self, dataset_service, datastore_service, event_service, subscription_service, workflow_service,
action_service, trigger_service, trigger_proxy):
self._dataset_service = dataset_service
self._datastore_service = datastore_service
self._event_service = event_service
self._subscription_service = subscription_service
self._workflow_service = workflow_service
self._action_service = action_service
self._trigger_service = trigger_service
self._trigger_proxy = trigger_proxy
self._resolvers = {
EntityType.dataset: self._resolve_and_save_dataset,
EntityType.datastore: self._resolve_and_save_datastore,
EntityType.event: self._resolve_and_save_event,
EntityType.subscription: self._resolve_and_save_subscription,
EntityType.workflow: self._resolve_and_save_workflow,
EntityType.action: self._resolve_and_save_action,
EntityType.trigger: self._resolve_and_save_trigger,
}
def save_entities(self, entity_map, debug=False):
""" :type entity_map: dict """
actual_entities_by_node_id = {}
actual_entities_by_unsaved_id = {}
# =========================================================================================================
# todo: do basic validation and bail early if there are issues
# =========================================================================================================
pass
try:
for node_id in entity_map['unsaved_entities'].keys():
entity_type = self._entity_type(node_id)
entity_id = self._entity_id(node_id)
resolve_and_save_func = self._resolvers[entity_type]
resolve_and_save_func(entity_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id)
self.update_related_triggers(entity_map)
db.session.commit()
affected_datastore_ids = set()
for node_id, entity in actual_entities_by_node_id.iteritems():
try:
entity_type = self._entity_type(node_id)
if entity_type == EntityType.datastore:
self._datastore_service.handle_datastore_state_change(entity, None, entity.data.state)
if entity_type == EntityType.subscription:
self._subscription_service.generate_subscription_elements(entity)
if entity_type == EntityType.trigger:
self._trigger_service.initialize_trigger(entity)
if entity_type == EntityType.action:
assert isinstance(entity, Action)
if entity.data.datastore_id:
affected_datastore_ids.add(entity.data.datastore_id)
except Exception:
_logger.error(json.dumps(traceback.format_exc()))
for datastore_id in affected_datastore_ids:
try:
self._trigger_proxy.try_next_action({'datastore_id':datastore_id})
except Exception:
_logger.error(json.dumps(traceback.format_exc()))
return actual_entities_by_node_id, None
except Exception as e:
db.session.rollback()
if debug:
return None, json.dumps(traceback.format_exc())
return None, e.message
def _resolve_and_save_dataset(self, entity_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id):
actual_id, unsaved_id = self._resolve(EntityType.dataset, entity_id, entity_map, actual_entities_by_unsaved_id)
if actual_id:
return actual_id
node_id = self._node_id(EntityType.dataset, unsaved_id)
dataset = Dataset.from_dict(entity_map['unsaved_entities'][node_id])
dataset = self._dataset_service.save_dataset(dataset, commit=False, flush=True)
actual_entities_by_node_id[node_id] = dataset
actual_entities_by_unsaved_id[unsaved_id] = dataset
return dataset.id
def _resolve_and_save_datastore(self, entity_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id):
actual_id, unsaved_id = self._resolve(EntityType.datastore, entity_id, entity_map, actual_entities_by_unsaved_id)
if actual_id:
return actual_id
node_id = self._node_id(EntityType.datastore, unsaved_id)
datastore = Datastore.from_dict(entity_map['unsaved_entities'][node_id])
datastore = self._datastore_service.save_datastore(datastore, commit_and_handle_state_change=False, flush=True)
actual_entities_by_node_id[node_id] = datastore
actual_entities_by_unsaved_id[unsaved_id] = datastore
return datastore.id
def _resolve_and_save_event(self, entity_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id):
actual_id, unsaved_id = self._resolve(EntityType.event, entity_id, entity_map, actual_entities_by_unsaved_id)
if actual_id:
return actual_id
node_id = self._node_id(EntityType.event, unsaved_id)
event = Event.from_dict(entity_map['unsaved_entities'][node_id])
event = self._event_service.save_event(event, commit=False, flush=True)
actual_entities_by_node_id[node_id] = event
actual_entities_by_unsaved_id[unsaved_id] = event
return event.id
def _resolve_and_save_subscription(self, entity_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id):
actual_id, unsaved_id = self._resolve(EntityType.subscription, entity_id, entity_map, actual_entities_by_unsaved_id)
if actual_id:
return actual_id
node_id = self._node_id(EntityType.subscription, unsaved_id)
subscription = Subscription.from_dict(entity_map['unsaved_entities'][node_id])
assert isinstance(subscription, Subscription)
subscription.data.dataset_id = self._resolve_and_save_dataset(subscription.data.dataset_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id)
subscription = self._subscription_service.save_subscription(subscription, commit_and_generate=False, flush=True)
actual_entities_by_node_id[node_id] = subscription
actual_entities_by_unsaved_id[unsaved_id] = subscription
return subscription.id
def _resolve_and_save_workflow(self, entity_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id):
actual_id, unsaved_id = self._resolve(EntityType.workflow, entity_id, entity_map, actual_entities_by_unsaved_id)
if actual_id:
return actual_id
node_id = self._node_id(EntityType.workflow, unsaved_id)
workflow = Workflow.from_dict(entity_map['unsaved_entities'][node_id])
assert isinstance(workflow, Workflow)
workflow.data.datastore_id = self._resolve_and_save_datastore(workflow.data.datastore_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id)
workflow = self._workflow_service.save_workflow(workflow, commit=False, flush=True)
actual_entities_by_node_id[node_id] = workflow
actual_entities_by_unsaved_id[unsaved_id] = workflow
return workflow.id
def _resolve_and_save_action(self, entity_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id):
actual_id, unsaved_id = self._resolve(EntityType.action, entity_id, entity_map, actual_entities_by_unsaved_id)
if actual_id:
return actual_id
node_id = self._node_id(EntityType.action, unsaved_id)
action = Action.from_dict(entity_map['unsaved_entities'][node_id])
assert isinstance(action, Action)
if action.data.datastore_id:
action.data.datastore_id = self._resolve_and_save_datastore(action.data.datastore_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id)
if action.data.workflow_id:
action.data.workflow_id = self._resolve_and_save_workflow(action.data.workflow_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id)
if action.data.args and action.data.args.get('subscription_id'):
action.data.args['subscription_id'] = self._resolve_and_save_subscription(action.data.args['subscription_id'], entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id)
if action.data.args and action.data.args.get('dataset_id'):
action.data.args['dataset_id'] = self._resolve_and_save_dataset(action.data.args['dataset_id'], entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id)
engine_name, datastore = self._find_engine_name_and_datastore(action)
action = self._action_service.save_actions([action], engine_name, datastore, commit=False, flush=True)[0]
actual_entities_by_node_id[node_id] = action
actual_entities_by_unsaved_id[unsaved_id] = action
return action.id
def _resolve_and_save_trigger(self, entity_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id):
actual_id, unsaved_id = self._resolve(EntityType.trigger, entity_id, entity_map, actual_entities_by_unsaved_id)
if actual_id:
return actual_id
node_id = self._node_id(EntityType.trigger, unsaved_id)
trigger = Trigger.from_dict(entity_map['unsaved_entities'][node_id])
assert isinstance(trigger, Trigger)
if trigger.data.args and trigger.data.args.get('completed_workflow_id'):
trigger.data.args['completed_workflow_id'] = self._resolve_and_save_workflow(trigger.data.args['completed_workflow_id'], entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id)
if trigger.data.args and trigger.data.args.get('event_id'):
trigger.data.args['event_id'] = self._resolve_and_save_event(trigger.data.args['event_id'], entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id)
if trigger.data.args and trigger.data.args.get('subscription_id'):
trigger.data.args['subscription_id'] = self._resolve_and_save_subscription(trigger.data.args['subscription_id'], entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id)
if trigger.data.workflow_ids:
wf_ids = set()
for wf_id in trigger.data.workflow_ids:
wf_ids.add(self._resolve_and_save_workflow(wf_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id))
trigger.data.workflow_ids = list(wf_ids)
if trigger.data.args.get('completed_trigger_ids'):
t_ids = set()
for t_id in trigger.data.args['completed_trigger_ids']:
t_ids.add(self._resolve_and_save_trigger(t_id, entity_map, actual_entities_by_node_id, actual_entities_by_unsaved_id))
trigger.data.args['completed_trigger_ids'] = list(t_ids)
trigger = self._trigger_service.save_trigger(trigger, commit_and_initialize=False, flush=True)
actual_entities_by_node_id[node_id] = trigger
actual_entities_by_unsaved_id[unsaved_id] = trigger
return trigger.id
def update_related_triggers(self, entity_map):
for entity_data in entity_map.get('related_entity_data', {}).values():
if entity_data['entity_type'] != EntityType.trigger:
continue
if self._is_a_reference(entity_data['entity_id']):
continue
edge = Edge.from_dict(entity_data['edge'])
assert isinstance(edge, Edge)
if entity_data['relationship'] == Relationship.PARENT:
assert edge.destination_type == EntityType.workflow
trigger = self._trigger_service.get_trigger(edge.source_id)
updated_workflow_ids = list(set((trigger.data.workflow_ids or []) + [edge.destination_id]))
self._trigger_service.update_trigger_workflow_ids(trigger, updated_workflow_ids)
if entity_data['relationship'] == Relationship.CHILD:
assert edge.source_type == EntityType.trigger
s_trigger = self._trigger_service.get_trigger(edge.destination_id)
assert s_trigger.data.trigger_type_name == super_trigger.name
updated_args = copy.deepcopy(s_trigger.data.args)
updated_args['completed_trigger_ids'] = list(set(
updated_args['completed_trigger_ids'] + [edge.source_id]
))
self._trigger_service.update_trigger_args(s_trigger, updated_args)
@staticmethod
def _entity_type(node_id):
return node_id.split('-', 1)[0]
@staticmethod
def _entity_id(node_id):
return node_id.split('-', 1)[1]
@staticmethod
def _node_id(entity_type, entity_id):
return entity_type + '-' + entity_id
@staticmethod
def _is_a_reference(entity_id):
return 'UNSAVED-' in entity_id or 'PARENT-' in entity_id or 'CHILD-' in entity_id
def _resolve(self, entity_type, entity_id, entity_map, actual_entities_by_unsaved_id):
if not self._is_a_reference(entity_id):
return entity_id, None
if entity_id in actual_entities_by_unsaved_id:
return actual_entities_by_unsaved_id[entity_id].id, None
if entity_id.startswith('UNSAVED-'):
return None, entity_id
if entity_id.startswith('PARENT-') or entity_id.startswith('CHILD-'):
related = entity_map['related_entity_data'][self._node_id(entity_type, entity_id)]
return self._resolve(entity_type, related['entity_id'], entity_map, actual_entities_by_unsaved_id)
raise Exception('could not resolve entity: %s-%s' % (entity_type, entity_id))
def _find_engine_name_and_datastore(self, action):
""" :type action: dart.model.action.Action """
datastore_id = action.data.datastore_id
if datastore_id:
datastore = self._datastore_service.get_datastore(datastore_id)
return datastore.data.engine_name, datastore
workflow_id = action.data.workflow_id
if workflow_id:
workflow = self._workflow_service.get_workflow(workflow_id)
datastore = self._datastore_service.get_datastore(workflow.data.datastore_id)
return datastore.data.engine_name, datastore
raise Exception('could not find datastore from action :%s' % action.id)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import heat.api.openstack.v1 as api_v1
from heat.tests import common
class RoutesTest(common.HeatTestCase):
def assertRoute(self, mapper, path, method, action, controller,
params=None):
params = params or {}
route = mapper.match(path, {'REQUEST_METHOD': method})
self.assertIsNotNone(route)
self.assertEqual(action, route['action'])
self.assertEqual(
controller, route['controller'].controller.__class__.__name__)
del(route['action'])
del(route['controller'])
self.assertEqual(params, route)
def setUp(self):
super(RoutesTest, self).setUp()
self.m = api_v1.API({}).map
def test_template_handling(self):
self.assertRoute(
self.m,
'/aaaa/resource_types',
'GET',
'list_resource_types',
'StackController',
{
'tenant_id': 'aaaa',
})
self.assertRoute(
self.m,
'/aaaa/resource_types/test_type',
'GET',
'resource_schema',
'StackController',
{
'tenant_id': 'aaaa',
'type_name': 'test_type'
})
self.assertRoute(
self.m,
'/aaaa/resource_types/test_type/template',
'GET',
'generate_template',
'StackController',
{
'tenant_id': 'aaaa',
'type_name': 'test_type'
})
self.assertRoute(
self.m,
'/aaaa/validate',
'POST',
'validate_template',
'StackController',
{
'tenant_id': 'aaaa'
})
def test_stack_collection(self):
self.assertRoute(
self.m,
'/aaaa/stacks',
'GET',
'index',
'StackController',
{
'tenant_id': 'aaaa'
})
self.assertRoute(
self.m,
'/aaaa/stacks',
'POST',
'create',
'StackController',
{
'tenant_id': 'aaaa'
})
self.assertRoute(
self.m,
'/aaaa/stacks/preview',
'POST',
'preview',
'StackController',
{
'tenant_id': 'aaaa'
})
self.assertRoute(
self.m,
'/aaaa/stacks/detail',
'GET',
'detail',
'StackController',
{
'tenant_id': 'aaaa'
})
def test_stack_data(self):
self.assertRoute(
self.m,
'/aaaa/stacks/teststack',
'GET',
'lookup',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack'
})
self.assertRoute(
self.m,
'/aaaa/stacks/arn:openstack:heat::6548ab64fbda49deb188851a3b7d8c8b'
':stacks/stack-1411-06/1c5d9bb2-3464-45e2-a728-26dfa4e1d34a',
'GET',
'lookup',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'arn:openstack:heat:'
':6548ab64fbda49deb188851a3b7d8c8b:stacks/stack-1411-06/'
'1c5d9bb2-3464-45e2-a728-26dfa4e1d34a'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/resources',
'GET',
'lookup',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'path': 'resources'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/events',
'GET',
'lookup',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'path': 'events'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb',
'GET',
'show',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
})
def test_stack_snapshot(self):
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/snapshots',
'POST',
'snapshot',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/snapshots/cccc',
'GET',
'show_snapshot',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
'snapshot_id': 'cccc'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/snapshots/cccc',
'DELETE',
'delete_snapshot',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
'snapshot_id': 'cccc'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/snapshots',
'GET',
'list_snapshots',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/snapshots/cccc/restore',
'POST',
'restore_snapshot',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
'snapshot_id': 'cccc'
})
def test_stack_data_template(self):
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/template',
'GET',
'template',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/template',
'GET',
'lookup',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'path': 'template'
})
def test_stack_post_actions(self):
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/actions',
'POST',
'action',
'ActionController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
})
def test_stack_post_actions_lookup_redirect(self):
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/actions',
'POST',
'lookup',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'path': 'actions'
})
def test_stack_update_delete(self):
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb',
'PUT',
'update',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb',
'DELETE',
'delete',
'StackController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
})
def test_resources(self):
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/resources',
'GET',
'index',
'ResourceController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/resources/cccc',
'GET',
'show',
'ResourceController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
'resource_name': 'cccc'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/resources/cccc/metadata',
'GET',
'metadata',
'ResourceController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
'resource_name': 'cccc'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/resources/cccc/signal',
'POST',
'signal',
'ResourceController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
'resource_name': 'cccc'
})
def test_events(self):
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/events',
'GET',
'index',
'EventController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/resources/cccc/events',
'GET',
'index',
'EventController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
'resource_name': 'cccc'
})
self.assertRoute(
self.m,
'/aaaa/stacks/teststack/bbbb/resources/cccc/events/dddd',
'GET',
'show',
'EventController',
{
'tenant_id': 'aaaa',
'stack_name': 'teststack',
'stack_id': 'bbbb',
'resource_name': 'cccc',
'event_id': 'dddd'
})
def test_software_configs(self):
self.assertRoute(
self.m,
'/aaaa/software_configs',
'GET',
'index',
'SoftwareConfigController',
{
'tenant_id': 'aaaa'
})
self.assertRoute(
self.m,
'/aaaa/software_configs',
'POST',
'create',
'SoftwareConfigController',
{
'tenant_id': 'aaaa'
})
self.assertRoute(
self.m,
'/aaaa/software_configs/bbbb',
'GET',
'show',
'SoftwareConfigController',
{
'tenant_id': 'aaaa',
'config_id': 'bbbb'
})
self.assertRoute(
self.m,
'/aaaa/software_configs/bbbb',
'DELETE',
'delete',
'SoftwareConfigController',
{
'tenant_id': 'aaaa',
'config_id': 'bbbb'
})
def test_software_deployments(self):
self.assertRoute(
self.m,
'/aaaa/software_deployments',
'GET',
'index',
'SoftwareDeploymentController',
{
'tenant_id': 'aaaa'
})
self.assertRoute(
self.m,
'/aaaa/software_deployments',
'POST',
'create',
'SoftwareDeploymentController',
{
'tenant_id': 'aaaa'
})
self.assertRoute(
self.m,
'/aaaa/software_deployments/bbbb',
'GET',
'show',
'SoftwareDeploymentController',
{
'tenant_id': 'aaaa',
'deployment_id': 'bbbb'
})
self.assertRoute(
self.m,
'/aaaa/software_deployments/bbbb',
'PUT',
'update',
'SoftwareDeploymentController',
{
'tenant_id': 'aaaa',
'deployment_id': 'bbbb'
})
self.assertRoute(
self.m,
'/aaaa/software_deployments/bbbb',
'DELETE',
'delete',
'SoftwareDeploymentController',
{
'tenant_id': 'aaaa',
'deployment_id': 'bbbb'
})
def test_build_info(self):
self.assertRoute(
self.m,
'/fake_tenant/build_info',
'GET',
'build_info',
'BuildInfoController',
{'tenant_id': 'fake_tenant'}
)
def test_405(self):
self.assertRoute(
self.m,
'/fake_tenant/validate',
'GET',
'reject',
'DefaultMethodController',
{'tenant_id': 'fake_tenant', 'allowed_methods': 'POST'}
)
self.assertRoute(
self.m,
'/fake_tenant/stacks',
'PUT',
'reject',
'DefaultMethodController',
{'tenant_id': 'fake_tenant', 'allowed_methods': 'GET,POST'}
)
self.assertRoute(
self.m,
'/fake_tenant/stacks/fake_stack/stack_id',
'POST',
'reject',
'DefaultMethodController',
{'tenant_id': 'fake_tenant', 'stack_name': 'fake_stack',
'stack_id': 'stack_id', 'allowed_methods': 'GET,PUT,PATCH,DELETE'}
)
def test_options(self):
self.assertRoute(
self.m,
'/fake_tenant/validate',
'OPTIONS',
'options',
'DefaultMethodController',
{'tenant_id': 'fake_tenant', 'allowed_methods': 'POST'}
)
self.assertRoute(
self.m,
'/fake_tenant/stacks/fake_stack/stack_id',
'OPTIONS',
'options',
'DefaultMethodController',
{'tenant_id': 'fake_tenant', 'stack_name': 'fake_stack',
'stack_id': 'stack_id', 'allowed_methods': 'GET,PUT,PATCH,DELETE'}
)
def test_services(self):
self.assertRoute(
self.m,
'/aaaa/services',
'GET',
'index',
'ServiceController',
{
'tenant_id': 'aaaa'
})
|
|
import datetime
import time
import warnings
import sys
import pymysql
from pymysql.tests import base
import unittest2
try:
import imp
reload = imp.reload
except AttributeError:
pass
__all__ = ["TestOldIssues", "TestNewIssues", "TestGitHubIssues"]
class TestOldIssues(base.PyMySQLTestCase):
def test_issue_3(self):
""" undefined methods datetime_or_None, date_or_None """
conn = self.connections[0]
c = conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
c.execute("drop table if exists issue3")
c.execute("create table issue3 (d date, t time, dt datetime, ts timestamp)")
try:
c.execute("insert into issue3 (d, t, dt, ts) values (%s,%s,%s,%s)", (None, None, None, None))
c.execute("select d from issue3")
self.assertEqual(None, c.fetchone()[0])
c.execute("select t from issue3")
self.assertEqual(None, c.fetchone()[0])
c.execute("select dt from issue3")
self.assertEqual(None, c.fetchone()[0])
c.execute("select ts from issue3")
self.assertTrue(isinstance(c.fetchone()[0], datetime.datetime))
finally:
c.execute("drop table issue3")
def test_issue_4(self):
""" can't retrieve TIMESTAMP fields """
conn = self.connections[0]
c = conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
c.execute("drop table if exists issue4")
c.execute("create table issue4 (ts timestamp)")
try:
c.execute("insert into issue4 (ts) values (now())")
c.execute("select ts from issue4")
self.assertTrue(isinstance(c.fetchone()[0], datetime.datetime))
finally:
c.execute("drop table issue4")
def test_issue_5(self):
""" query on information_schema.tables fails """
con = self.connections[0]
cur = con.cursor()
cur.execute("select * from information_schema.tables")
def test_issue_6(self):
""" exception: TypeError: ord() expected a character, but string of length 0 found """
# ToDo: this test requires access to db 'mysql'.
kwargs = self.databases[0].copy()
kwargs['db'] = "mysql"
conn = pymysql.connect(**kwargs)
c = conn.cursor()
c.execute("select * from user")
conn.close()
def test_issue_8(self):
""" Primary Key and Index error when selecting data """
conn = self.connections[0]
c = conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
c.execute("drop table if exists test")
c.execute("""CREATE TABLE `test` (`station` int(10) NOT NULL DEFAULT '0', `dh`
datetime NOT NULL DEFAULT '2015-01-01 00:00:00', `echeance` int(1) NOT NULL
DEFAULT '0', `me` double DEFAULT NULL, `mo` double DEFAULT NULL, PRIMARY
KEY (`station`,`dh`,`echeance`)) ENGINE=MyISAM DEFAULT CHARSET=latin1;""")
try:
self.assertEqual(0, c.execute("SELECT * FROM test"))
c.execute("ALTER TABLE `test` ADD INDEX `idx_station` (`station`)")
self.assertEqual(0, c.execute("SELECT * FROM test"))
finally:
c.execute("drop table test")
def test_issue_9(self):
""" sets DeprecationWarning in Python 2.6 """
try:
reload(pymysql)
except DeprecationWarning:
self.fail()
def test_issue_13(self):
""" can't handle large result fields """
conn = self.connections[0]
cur = conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
cur.execute("drop table if exists issue13")
try:
cur.execute("create table issue13 (t text)")
# ticket says 18k
size = 18*1024
cur.execute("insert into issue13 (t) values (%s)", ("x" * size,))
cur.execute("select t from issue13")
# use assertTrue so that obscenely huge error messages don't print
r = cur.fetchone()[0]
self.assertTrue("x" * size == r)
finally:
cur.execute("drop table issue13")
def test_issue_15(self):
""" query should be expanded before perform character encoding """
conn = self.connections[0]
c = conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
c.execute("drop table if exists issue15")
c.execute("create table issue15 (t varchar(32))")
try:
c.execute("insert into issue15 (t) values (%s)", (u'\xe4\xf6\xfc',))
c.execute("select t from issue15")
self.assertEqual(u'\xe4\xf6\xfc', c.fetchone()[0])
finally:
c.execute("drop table issue15")
def test_issue_16(self):
""" Patch for string and tuple escaping """
conn = self.connections[0]
c = conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
c.execute("drop table if exists issue16")
c.execute("create table issue16 (name varchar(32) primary key, email varchar(32))")
try:
c.execute("insert into issue16 (name, email) values ('pete', 'floydophone')")
c.execute("select email from issue16 where name=%s", ("pete",))
self.assertEqual("floydophone", c.fetchone()[0])
finally:
c.execute("drop table issue16")
@unittest2.skip("test_issue_17() requires a custom, legacy MySQL configuration and will not be run.")
def test_issue_17(self):
"""could not connect mysql use passwod"""
conn = self.connections[0]
host = self.databases[0]["host"]
db = self.databases[0]["db"]
c = conn.cursor()
# grant access to a table to a user with a password
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
c.execute("drop table if exists issue17")
c.execute("create table issue17 (x varchar(32) primary key)")
c.execute("insert into issue17 (x) values ('hello, world!')")
c.execute("grant all privileges on %s.issue17 to 'issue17user'@'%%' identified by '1234'" % db)
conn.commit()
conn2 = pymysql.connect(host=host, user="issue17user", passwd="1234", db=db)
c2 = conn2.cursor()
c2.execute("select x from issue17")
self.assertEqual("hello, world!", c2.fetchone()[0])
finally:
c.execute("drop table issue17")
class TestNewIssues(base.PyMySQLTestCase):
def test_issue_34(self):
try:
pymysql.connect(host="localhost", port=1237, user="root")
self.fail()
except pymysql.OperationalError as e:
self.assertEqual(2003, e.args[0])
except Exception:
self.fail()
def test_issue_33(self):
conn = pymysql.connect(charset="utf8", **self.databases[0])
self.safe_create_table(conn, u'hei\xdfe',
u'create table hei\xdfe (name varchar(32))')
c = conn.cursor()
c.execute(u"insert into hei\xdfe (name) values ('Pi\xdfata')")
c.execute(u"select name from hei\xdfe")
self.assertEqual(u"Pi\xdfata", c.fetchone()[0])
@unittest2.skip("This test requires manual intervention")
def test_issue_35(self):
conn = self.connections[0]
c = conn.cursor()
print("sudo killall -9 mysqld within the next 10 seconds")
try:
c.execute("select sleep(10)")
self.fail()
except pymysql.OperationalError as e:
self.assertEqual(2013, e.args[0])
def test_issue_36(self):
# connection 0 is super user, connection 1 isn't
conn = self.connections[1]
c = conn.cursor()
c.execute("show processlist")
kill_id = None
for row in c.fetchall():
id = row[0]
info = row[7]
if info == "show processlist":
kill_id = id
break
self.assertEqual(kill_id, conn.thread_id())
# now nuke the connection
self.connections[0].kill(kill_id)
# make sure this connection has broken
try:
c.execute("show tables")
self.fail()
except Exception:
pass
c.close()
conn.close()
# check the process list from the other connection
try:
# Wait since Travis-CI sometimes fail this test.
time.sleep(0.1)
c = self.connections[0].cursor()
c.execute("show processlist")
ids = [row[0] for row in c.fetchall()]
self.assertFalse(kill_id in ids)
finally:
del self.connections[1]
def test_issue_37(self):
conn = self.connections[0]
c = conn.cursor()
self.assertEqual(1, c.execute("SELECT @foo"))
self.assertEqual((None,), c.fetchone())
self.assertEqual(0, c.execute("SET @foo = 'bar'"))
c.execute("set @foo = 'bar'")
def test_issue_38(self):
conn = self.connections[0]
c = conn.cursor()
datum = "a" * 1024 * 1023 # reduced size for most default mysql installs
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
c.execute("drop table if exists issue38")
c.execute("create table issue38 (id integer, data mediumblob)")
c.execute("insert into issue38 values (1, %s)", (datum,))
finally:
c.execute("drop table issue38")
def disabled_test_issue_54(self):
conn = self.connections[0]
c = conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
c.execute("drop table if exists issue54")
big_sql = "select * from issue54 where "
big_sql += " and ".join("%d=%d" % (i,i) for i in range(0, 100000))
try:
c.execute("create table issue54 (id integer primary key)")
c.execute("insert into issue54 (id) values (7)")
c.execute(big_sql)
self.assertEqual(7, c.fetchone()[0])
finally:
c.execute("drop table issue54")
class TestGitHubIssues(base.PyMySQLTestCase):
def test_issue_66(self):
""" 'Connection' object has no attribute 'insert_id' """
conn = self.connections[0]
c = conn.cursor()
self.assertEqual(0, conn.insert_id())
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
c.execute("drop table if exists issue66")
c.execute("create table issue66 (id integer primary key auto_increment, x integer)")
c.execute("insert into issue66 (x) values (1)")
c.execute("insert into issue66 (x) values (1)")
self.assertEqual(2, conn.insert_id())
finally:
c.execute("drop table issue66")
def test_issue_79(self):
""" Duplicate field overwrites the previous one in the result of DictCursor """
conn = self.connections[0]
c = conn.cursor(pymysql.cursors.DictCursor)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
c.execute("drop table if exists a")
c.execute("drop table if exists b")
c.execute("""CREATE TABLE a (id int, value int)""")
c.execute("""CREATE TABLE b (id int, value int)""")
a=(1,11)
b=(1,22)
try:
c.execute("insert into a values (%s, %s)", a)
c.execute("insert into b values (%s, %s)", b)
c.execute("SELECT * FROM a inner join b on a.id = b.id")
r = c.fetchall()[0]
self.assertEqual(r['id'], 1)
self.assertEqual(r['value'], 11)
self.assertEqual(r['b.value'], 22)
finally:
c.execute("drop table a")
c.execute("drop table b")
def test_issue_95(self):
""" Leftover trailing OK packet for "CALL my_sp" queries """
conn = self.connections[0]
cur = conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
cur.execute("DROP PROCEDURE IF EXISTS `foo`")
cur.execute("""CREATE PROCEDURE `foo` ()
BEGIN
SELECT 1;
END""")
try:
cur.execute("""CALL foo()""")
cur.execute("""SELECT 1""")
self.assertEqual(cur.fetchone()[0], 1)
finally:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
cur.execute("DROP PROCEDURE IF EXISTS `foo`")
def test_issue_114(self):
""" autocommit is not set after reconnecting with ping() """
conn = pymysql.connect(charset="utf8", **self.databases[0])
conn.autocommit(False)
c = conn.cursor()
c.execute("""select @@autocommit;""")
self.assertFalse(c.fetchone()[0])
conn.close()
conn.ping()
c.execute("""select @@autocommit;""")
self.assertFalse(c.fetchone()[0])
conn.close()
# Ensure autocommit() is still working
conn = pymysql.connect(charset="utf8", **self.databases[0])
c = conn.cursor()
c.execute("""select @@autocommit;""")
self.assertFalse(c.fetchone()[0])
conn.close()
conn.ping()
conn.autocommit(True)
c.execute("""select @@autocommit;""")
self.assertTrue(c.fetchone()[0])
conn.close()
def test_issue_175(self):
""" The number of fields returned by server is read in wrong way """
conn = self.connections[0]
cur = conn.cursor()
for length in (200, 300):
columns = ', '.join('c{0} integer'.format(i) for i in range(length))
sql = 'create table test_field_count ({0})'.format(columns)
try:
cur.execute(sql)
cur.execute('select * from test_field_count')
assert len(cur.description) == length
finally:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
cur.execute('drop table if exists test_field_count')
def test_issue_321(self):
""" Test iterable as query argument. """
conn = pymysql.connect(charset="utf8", **self.databases[0])
self.safe_create_table(
conn, "issue321",
"create table issue321 (value_1 varchar(1), value_2 varchar(1))")
sql_insert = "insert into issue321 (value_1, value_2) values (%s, %s)"
sql_dict_insert = ("insert into issue321 (value_1, value_2) "
"values (%(value_1)s, %(value_2)s)")
sql_select = ("select * from issue321 where "
"value_1 in %s and value_2=%s")
data = [
[(u"a", ), u"\u0430"],
[[u"b"], u"\u0430"],
{"value_1": [[u"c"]], "value_2": u"\u0430"}
]
cur = conn.cursor()
self.assertEqual(cur.execute(sql_insert, data[0]), 1)
self.assertEqual(cur.execute(sql_insert, data[1]), 1)
self.assertEqual(cur.execute(sql_dict_insert, data[2]), 1)
self.assertEqual(
cur.execute(sql_select, [(u"a", u"b", u"c"), u"\u0430"]), 3)
self.assertEqual(cur.fetchone(), (u"a", u"\u0430"))
self.assertEqual(cur.fetchone(), (u"b", u"\u0430"))
self.assertEqual(cur.fetchone(), (u"c", u"\u0430"))
def test_issue_364(self):
""" Test mixed unicode/binary arguments in executemany. """
conn = pymysql.connect(charset="utf8", **self.databases[0])
self.safe_create_table(
conn, "issue364",
"create table issue364 (value_1 binary(3), value_2 varchar(3)) "
"engine=InnoDB default charset=utf8")
sql = "insert into issue364 (value_1, value_2) values (%s, %s)"
usql = u"insert into issue364 (value_1, value_2) values (%s, %s)"
values = [pymysql.Binary(b"\x00\xff\x00"), u"\xe4\xf6\xfc"]
# test single insert and select
cur = conn.cursor()
cur.execute(sql, args=values)
cur.execute("select * from issue364")
self.assertEqual(cur.fetchone(), tuple(values))
# test single insert unicode query
cur.execute(usql, args=values)
# test multi insert and select
cur.executemany(sql, args=(values, values, values))
cur.execute("select * from issue364")
for row in cur.fetchall():
self.assertEqual(row, tuple(values))
# test multi insert with unicode query
cur.executemany(usql, args=(values, values, values))
def test_issue_363(self):
""" Test binary / geometry types. """
conn = pymysql.connect(charset="utf8", **self.databases[0])
self.safe_create_table(
conn, "issue363",
"CREATE TABLE issue363 ( "
"id INTEGER PRIMARY KEY, geom LINESTRING NOT NULL, "
"SPATIAL KEY geom (geom)) "
"ENGINE=MyISAM default charset=utf8")
cur = conn.cursor()
query = ("INSERT INTO issue363 (id, geom) VALUES"
"(1998, GeomFromText('LINESTRING(1.1 1.1,2.2 2.2)'))")
# From MySQL 5.7, ST_GeomFromText is added and GeomFromText is deprecated.
if self.mysql_server_is(conn, (5, 7, 0)):
with self.assertWarns(pymysql.err.Warning) as cm:
cur.execute(query)
else:
cur.execute(query)
# select WKT
query = "SELECT AsText(geom) FROM issue363"
if sys.version_info[0:2] >= (3,2) and self.mysql_server_is(conn, (5, 7, 0)):
with self.assertWarns(pymysql.err.Warning) as cm:
cur.execute(query)
else:
cur.execute(query)
row = cur.fetchone()
self.assertEqual(row, ("LINESTRING(1.1 1.1,2.2 2.2)", ))
# select WKB
query = "SELECT AsBinary(geom) FROM issue363"
if sys.version_info[0:2] >= (3,2) and self.mysql_server_is(conn, (5, 7, 0)):
with self.assertWarns(pymysql.err.Warning) as cm:
cur.execute(query)
else:
cur.execute(query)
row = cur.fetchone()
self.assertEqual(row,
(b"\x01\x02\x00\x00\x00\x02\x00\x00\x00"
b"\x9a\x99\x99\x99\x99\x99\xf1?"
b"\x9a\x99\x99\x99\x99\x99\xf1?"
b"\x9a\x99\x99\x99\x99\x99\x01@"
b"\x9a\x99\x99\x99\x99\x99\x01@", ))
# select internal binary
cur.execute("SELECT geom FROM issue363")
row = cur.fetchone()
# don't assert the exact internal binary value, as it could
# vary across implementations
self.assertTrue(isinstance(row[0], bytes))
|
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Events module views
"""
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.utils.html import strip_tags
from django.db.models import Q
from treeio.core.rendering import render_to_response
from treeio.core.models import Object
from treeio.core.views import user_denied
from treeio.core.decorators import treeio_login_required, handle_response_format
from treeio.events.models import Event
from treeio.events.forms import EventForm, GoToDateForm, FilterForm, MassActionForm
from treeio.events.rendering import EventCollection
from datetime import datetime
from dateutil.relativedelta import relativedelta
import calendar
START_HOUR = 6
END_HOUR = 23
def _get_default_context(request):
"Returns default context as a dict()"
massform = MassActionForm(request.user.profile)
context = {'massform': massform}
return context
def _get_filter_query(args):
"Creates a query to filter Events based on FilterForm arguments"
query = Q()
if 'datefrom' in args and 'dateto' in args and args['datefrom'] and args['dateto']:
datefrom = datetime.date(
datetime.strptime(args['datefrom'], '%m/%d/%Y'))
dateto = datetime.date(datetime.strptime(args['dateto'], '%m/%d/%Y'))
dateto = datetime(year=dateto.year, month=dateto.month,
day=dateto.day, hour=23, minute=59, second=59)
query = Q(end__gte=datefrom)
query = query & Q(Q(start__isnull=True) | Q(start__lte=dateto))
return query
def _process_mass_form(f):
"Pre-process request to handle mass action form for Events"
def wrap(request, *args, **kwargs):
"Wrap"
user = request.user.profile
if 'massform' in request.POST:
for key in request.POST:
if 'mass-event' in key:
try:
event = Event.objects.get(pk=request.POST[key])
form = MassActionForm(
user, request.POST, instance=event)
if form.is_valid() and user.has_permission(event, mode='w'):
form.save()
except Exception:
pass
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
@treeio_login_required
@handle_response_format
@_process_mass_form
def index(request, response_format='html'):
"Index page: display all events"
if request.GET:
filters = FilterForm(request.GET)
if filters.is_valid():
query = _get_filter_query(request.GET)
else:
query = Q()
else:
query = Q()
filters = FilterForm()
events = Object.filter_by_request(request, Event.objects.filter(query))
context = _get_default_context(request)
context.update({'events': events,
'filters': filters})
return render_to_response('events/index', context,
context_instance=RequestContext(request), response_format=response_format)
@treeio_login_required
@handle_response_format
@_process_mass_form
def upcoming(request, response_format='html'):
"Upcoming Events"
now = datetime.now()
query = Q(start__gte=now) | Q(end__gte=now)
events = Object.filter_by_request(
request, Event.objects.filter(query).order_by('-end'))
context = _get_default_context(request)
context.update({'events': events})
return render_to_response('events/upcoming', context,
context_instance=RequestContext(request), response_format=response_format)
#
# Calendar View
#
@treeio_login_required
@handle_response_format
def month_view(request, response_format='html'):
"Month view - each cell represents a day"
events = Object.filter_by_request(request, Event.objects)
date_current = now = datetime.now()
istoday = True
gotoform = GoToDateForm(now, request.GET)
if request.GET:
if 'date_year' in request.GET and 'date_month' in request.GET:
try:
year = int(request.GET['date_year'])
month = int(request.GET['date_month'])
if year >= 1900 and month >= 1 and month <= 12:
date_current = datetime(year, month, 1)
istoday = date_current == now
except Exception:
pass
if gotoform.is_valid() and gotoform.cleaned_data['goto']:
date_current = gotoform.cleaned_data['goto']
istoday = date_current == now
now = datetime(
date_current.year, date_current.month, date_current.day)
dates = calendar.Calendar().monthdatescalendar(
date_current.year, date_current.month)
date_previous = date_current - relativedelta(months=+1)
date_next = date_current + relativedelta(months=+1)
wrapped_events = EventCollection(events)
wrapped_events.collect_events(request)
return render_to_response('events/month_view',
{'events': wrapped_events,
'dates': dates,
'date_previous': date_previous,
'date_next': date_next,
'date_current': date_current,
'gotoform': gotoform.as_ul(),
'istoday': istoday,
'now': now},
context_instance=RequestContext(request), response_format=response_format)
@treeio_login_required
@handle_response_format
def week_view(request, response_format='html'):
"Week view - each slot represents an hour"
events = Object.filter_by_request(request, Event.objects)
date_current = now = datetime.now()
istoday = True
gotoform = GoToDateForm(now, request.GET)
if request.GET:
if 'date_year' in request.GET and 'date_month' in request.GET and 'date_day' in request.GET:
try:
day = int(request.GET['date_day'])
year = int(request.GET['date_year'])
month = int(request.GET['date_month'])
if year >= 1900 and month >= 1 and month <= 12 and day >= 1 and day <= 31:
date_current = datetime(year, month, day)
istoday = date_current == now
except Exception:
pass
if gotoform.is_valid() and gotoform.cleaned_data['goto']:
date_current = gotoform.cleaned_data['goto']
istoday = date_current == now
date_current = now = datetime(
date_current.year, date_current.month, date_current.day)
date_previous = date_current - relativedelta(weeks=+1)
date_next = date_current + relativedelta(weeks=+1)
weeks = calendar.Calendar().monthdatescalendar(
date_current.year, date_current.month)
current_week = []
for week in weeks:
if date_current.date() in week:
current_week = week
break
wrapped_events = EventCollection(events, START_HOUR, END_HOUR)
wrapped_events.collect_events(request)
hours = range(START_HOUR, END_HOUR + 1)
return render_to_response('events/week_view',
{'events': wrapped_events,
'week': current_week,
'start_date': current_week[0],
'end_date': current_week[6],
'date_previous': date_previous,
'date_next': date_next,
'date_current': date_current,
'gotoform': gotoform.as_ul(),
'istoday': istoday,
'hours': hours,
'now': now},
context_instance=RequestContext(request), response_format=response_format)
@treeio_login_required
@handle_response_format
def day_view(request, response_format='html'):
"Day view - each slot represents an hour"
events = Object.filter_by_request(request, Event.objects)
date_current = now = datetime.now()
istoday = True
gotoform = GoToDateForm(now, request.GET)
if request.GET:
if 'date_year' in request.GET and 'date_month' in request.GET and 'date_day' in request.GET:
try:
day = int(request.GET['date_day'])
year = int(request.GET['date_year'])
month = int(request.GET['date_month'])
if year >= 1900 and month >= 1 and month <= 12 and day >= 1 and day <= 31:
date_current = datetime(year, month, day)
istoday = date_current == now
except Exception:
pass
if gotoform.is_valid() and gotoform.cleaned_data['goto']:
date_current = gotoform.cleaned_data['goto']
istoday = date_current == now
date_current = now = datetime(
date_current.year, date_current.month, date_current.day)
day = date_current.date()
date_previous = date_current - relativedelta(days=+1)
date_next = date_current + relativedelta(days=+1)
wrapped_events = EventCollection(events, START_HOUR, END_HOUR)
wrapped_events.collect_events(request)
hours = range(START_HOUR, END_HOUR + 1)
return render_to_response('events/day_view',
{'events': wrapped_events,
'day': day,
'hours': hours,
'date_previous': date_previous,
'date_next': date_next,
'date_current': date_current,
'gotoform': gotoform.as_ul(),
'istoday': istoday,
'now': now},
context_instance=RequestContext(request), response_format=response_format)
#
# Events
#
@treeio_login_required
@handle_response_format
def event_view(request, event_id, response_format='html'):
"Event view"
event = get_object_or_404(Event, pk=event_id)
if not request.user.profile.has_permission(event):
return user_denied(request, message="You don't have access to this Event")
return render_to_response('events/event_view',
{'event': event},
context_instance=RequestContext(request), response_format=response_format)
@treeio_login_required
@handle_response_format
def event_edit(request, event_id, response_format='html'):
"Event edit"
event = get_object_or_404(Event, pk=event_id)
if not request.user.profile.has_permission(event, mode='w'):
return user_denied(request, message="You don't have access to this Event")
if request.POST:
if 'cancel' not in request.POST:
form = EventForm(
request.user.profile, None, None, request.POST, instance=event)
if form.is_valid():
event = form.save()
return HttpResponseRedirect(reverse('events_event_view', args=[event.id]))
else:
return HttpResponseRedirect(reverse('events'))
else:
form = EventForm(request.user.profile, instance=event)
return render_to_response('events/event_edit',
{'event': event,
'form': form},
context_instance=RequestContext(request), response_format=response_format)
@treeio_login_required
@handle_response_format
def event_delete(request, event_id, response_format='html'):
"Event delete"
event = get_object_or_404(Event, pk=event_id)
if not request.user.profile.has_permission(event, mode='w'):
return user_denied(request, message="You don't have access to this Event")
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
event.trash = True
event.save()
else:
event.delete()
return HttpResponseRedirect(reverse('events_index'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('events_event_view', args=[event.id]))
return render_to_response('events/event_delete',
{'event': event},
context_instance=RequestContext(request), response_format=response_format)
@treeio_login_required
@handle_response_format
def event_add(request, date=None, hour=12, response_format='html'):
"Event add form"
if request.POST:
if 'cancel' not in request.POST:
event = Event()
form = EventForm(
request.user.profile, date, hour, request.POST, instance=event)
if form.is_valid():
event = form.save()
event.set_user_from_request(request)
return HttpResponseRedirect(reverse('events_event_view', args=[event.id]))
else:
return HttpResponseRedirect(reverse('events'))
else:
form = EventForm(request.user.profile, date, hour)
return render_to_response('events/event_add',
{'form': form},
context_instance=RequestContext(request), response_format=response_format)
@treeio_login_required
def ical_all_event(request, response_format='ical'):
"Export upcoming events "
query = Q()
events = Object.filter_by_request(request, Event.objects.filter(query))
icalstream = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:PUBLISH
PRODID:-//PYVOBJECT//NONSGML Version 1//EN
"""
vevent = ""
for event in events:
vevent += "BEGIN:VEVENT\n"
if event.start:
vevent += "DTSTART;VALUE=DATE:%s\n" % str(
(datetime.strptime(str(event.start)[0:10], '%Y-%m-%d')))[0:10].replace("-", "")
vevent += "DTEND;VALUE=DATE:%s\n" % str(
(datetime.strptime(str(event.end)[0:10], '%Y-%m-%d')))[0:10].replace("-", "")
if not event.details:
vevent += "SUMMARY:%s\n" % strip_tags(event.name)
else:
vevent += "SUMMARY:%s\n" % strip_tags(event.details)
vevent += "UID:%s\n" % (event.name)
vevent += "END:VEVENT\n"
icalstream += vevent
icalstream += """X-WR-CALDESC:Tree.io Calendar
X-WR-CALNAME:Tree.io
X-WR-TIMEZONE:London/UK
END:VCALENDAR
"""
response = HttpResponse(icalstream, mimetype='text/calendar')
response['Filename'] = 'events.ics' # IE needs this
response['Content-Disposition'] = 'attachment; filename=events.ics'
return response
#
# Widgets
#
@handle_response_format
@treeio_login_required
def widget_week_view(request, response_format='html'):
"Week view - each slot represents an hour"
events = Object.filter_by_request(request, Event.objects)
date_current = now = datetime.now()
istoday = True
gotoform = GoToDateForm(now, request.GET)
if request.GET:
if 'date_year' in request.GET and 'date_month' in request.GET and 'date_day' in request.GET:
try:
day = int(request.GET['date_day'])
year = int(request.GET['date_year'])
month = int(request.GET['date_month'])
if year >= 1900 and 1 <= month <= 12 and 1 <= day <= 31:
date_current = datetime(year, month, day)
istoday = date_current == now
except Exception:
pass
if gotoform.is_valid() and gotoform.cleaned_data['goto']:
date_current = gotoform.cleaned_data['goto']
istoday = date_current == now
date_current = now = datetime(
date_current.year, date_current.month, date_current.day)
date_previous = date_current - relativedelta(weeks=+1)
date_next = date_current + relativedelta(weeks=+1)
weeks = calendar.Calendar().monthdatescalendar(
date_current.year, date_current.month)
current_week = []
for week in weeks:
if date_current.date() in week:
current_week = week
break
wrapped_events = EventCollection(events, START_HOUR, END_HOUR)
wrapped_events.collect_events(request)
dates = calendar.Calendar().monthdatescalendar(
date_current.year, date_current.month)
wrapped_events = EventCollection(events)
wrapped_events.collect_events(request)
return render_to_response('events/widgets/week_view',
{'events': wrapped_events,
'dates': dates,
'week': current_week,
'date_previous': date_previous,
'date_next': date_next,
'start_date': current_week[0],
'end_date': current_week[6],
'date_current': date_current,
'gotoform': gotoform.as_ul(),
'istoday': istoday,
'now': now},
context_instance=RequestContext(request), response_format=response_format)
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from mcfw.properties import bool_property, unicode_property, long_property, typed_property, unicode_list_property, \
long_list_property, float_property
from rogerthat.models.properties.friend import FriendDetailTO
from rogerthat.to import TO
from rogerthat.utils.app import get_human_user_from_app_user
class AppInfoTO(object):
id = unicode_property('0')
name = unicode_property('1')
ios_appstore_url = unicode_property('2')
android_playstore_url = unicode_property('3')
@staticmethod
def fromModel(model):
app = AppInfoTO()
app.id = model.app_id
app.name = model.name
app.ios_appstore_url = model.ios_appstore_web_uri
app.android_playstore_url = model.android_market_android_uri
return app
class AppQRTemplateTO(object):
key_name = unicode_property('1')
is_default = bool_property('2')
description = unicode_property('3')
body_color = unicode_property('4')
def __init__(self, key_name=None, is_default=False, description=None, body_color=None):
self.key_name = key_name
self.is_default = is_default
self.description = description
self.body_color = body_color
@classmethod
def from_model(cls, model, is_default=False):
"""
Args:
model (rogerthat.models.QRTemplate)
is_default (bool)
"""
rgb = u''.join([('%X' % c).rjust(2, '0') for c in model.body_color])
return cls(model.key().name(), is_default, model.description, rgb)
class CreateAppQRTemplateTO(AppQRTemplateTO):
file = unicode_property('5')
class PatchAppTO(TO):
title = unicode_property('title')
app_type = long_property('app_type')
playstore_track = unicode_property('playstore_track')
main_service = unicode_property('main_service')
secure = bool_property('secure')
facebook_registration = bool_property('facebook_registration')
facebook_app_id = long_property('facebook_app_id')
facebook_app_secret = unicode_property('facebook_app_secret')
chat_payments_enabled = bool_property('chat_payments_enabled')
class AppTO(TO):
id = unicode_property('id')
name = unicode_property('name')
type = long_property('type')
core_branding_hash = unicode_property('core_branding_hash')
facebook_app_id = long_property('facebook_app_id')
facebook_app_secret = unicode_property('facebook_app_secret')
ios_app_id = unicode_property('ios_app_id')
android_app_id = unicode_property('android_app_id')
creation_time = long_property('creation_time')
is_default = bool_property('is_default')
user_regex = unicode_property('user_regex')
dashboard_email_address = unicode_property('dashboard_email_address')
admin_services = unicode_list_property('admin_services')
demo = bool_property('demo')
beta = bool_property('beta')
mdp_client_id = unicode_property('mdp_client_id')
mdp_client_secret = unicode_property('mdp_client_secret')
contact_email_address = unicode_property('contact_email_address')
secure = bool_property('secure')
owncloud_base_uri = unicode_property('owncloud_base_uri')
owncloud_admin_username = unicode_property('owncloud_admin_username')
owncloud_admin_password = unicode_property('owncloud_admin_password')
main_service = unicode_property('main_service')
default_app_name_mapping = unicode_property('default_app_name_mapping')
country = unicode_property('country')
community_ids = long_list_property('community_ids')
service_filter_type = long_property('service_filter_type')
can_choose_home_screen = bool_property('can_choose_home_screen')
@classmethod
def from_model(cls, model):
# type: (rogerthat.models.App) -> AppTO
app = cls()
app.id = model.app_id
app.name = model.name
app.type = model.type
app.main_service = model.main_service
app.core_branding_hash = model.core_branding_hash
app.facebook_app_id = model.facebook_app_id
app.facebook_app_secret = model.facebook_app_secret
app.ios_app_id = model.ios_app_id
app.android_app_id = model.android_app_id
app.creation_time = model.creation_time
app.is_default = model.is_default
app.user_regex = model.user_regex
app.dashboard_email_address = model.dashboard_email_address
app.admin_services = model.admin_services
app.demo = model.demo
app.beta = model.beta
app.secure = model.secure
app.mdp_client_id = model.mdp_client_id
app.mdp_client_secret = model.mdp_client_secret
app.contact_email_address = model.contact_email_address
app.owncloud_base_uri = model.owncloud_base_uri
app.owncloud_admin_username = model.owncloud_admin_username
app.owncloud_admin_password = model.owncloud_admin_password
app.default_app_name_mapping = model.default_app_name_mapping
app.country = model.country
app.community_ids = model.community_ids
app.service_filter_type = model.service_filter_type
app.can_choose_home_screen = model.can_choose_home_screen
return app
class CreateAppTO(TO):
app_id = unicode_property('1')
title = unicode_property('title')
app_type = long_property('app_type')
dashboard_email_address = unicode_property('4')
main_language = unicode_property('main_language')
country = unicode_property('country')
official_id = long_property('official_id')
ios_developer_account = long_property('ios_developer_account')
review_notes = long_property('review_notes')
class AppUserRelationTO(object):
email = unicode_property('1')
name = unicode_property('2')
type = unicode_property('3') # human / application
def __init__(self, email, name, type_):
self.email = email
self.name = name
self.type = type_
class AppUserTO(object):
email = unicode_property('1')
name = unicode_property('2')
relations = typed_property('3', AppUserRelationTO, True)
def __init__(self, user_profile, friendMap):
self.email = get_human_user_from_app_user(user_profile.user).email()
self.name = user_profile.name
self.relations = list()
if friendMap:
for f in friendMap.get_friend_details().values():
if f.existence != FriendDetailTO.FRIEND_EXISTENCE_ACTIVE:
continue
self.relations.append(AppUserRelationTO(f.email, f.name,
u"human" if f.type == FriendDetailTO.TYPE_USER else u"application"))
class AppUserListResultTO(object):
cursor = unicode_property('1')
users = typed_property('2', AppUserTO, True)
class AppSettingsTO(object):
wifi_only_downloads = bool_property('wifi_only_downloads')
background_fetch_timestamps = long_list_property('background_fetch_timestamps')
birthday_message_enabled = bool_property('birthday_message_enabled')
birthday_message = unicode_property('birthday_message')
tos_enabled = bool_property('tos_enabled')
ios_firebase_project_id = unicode_property('ios_firebase_project_id')
ios_apns_key_id = unicode_property('ios_apns_key_id')
def __init__(self, wifi_only_downloads=None, background_fetch_timestamps=None,
birthday_message_enabled=False, birthday_message=None, tos_enabled=True,
ios_firebase_project_id=None, ios_apns_key_id=None):
if background_fetch_timestamps is None:
background_fetch_timestamps = []
self.wifi_only_downloads = wifi_only_downloads
self.background_fetch_timestamps = background_fetch_timestamps
self.birthday_message_enabled = birthday_message_enabled
self.birthday_message = birthday_message
self.tos_enabled = tos_enabled
self.ios_firebase_project_id = ios_firebase_project_id
self.ios_apns_key_id = ios_apns_key_id
@classmethod
def from_model(cls, model, ios_apns_key_id=None):
"""
Args:
model (rogerthat.models.AppSettings)
"""
return cls(model.wifi_only_downloads, model.background_fetch_timestamps,
model.birthday_message_enabled, model.birthday_message, model.tos_enabled,
model.ios_firebase_project_id, ios_apns_key_id)
# This object is sent to the phones
class AppAssetTO(object):
kind = unicode_property('1')
url = unicode_property('2')
scale_x = float_property('3')
def __init__(self, kind=None, url=None, scale_x=0.0):
self.kind = kind
self.url = url
self.scale_x = scale_x
# This object is used for managing app assets
class AppAssetFullTO(AppAssetTO):
id = unicode_property('9')
app_ids = unicode_list_property('10')
content_type = unicode_property('11')
is_default = bool_property('12')
def __init__(self, key=None, kind=None, url=None, scale_x=None, app_ids=None, uploaded_on=None, modified_on=None,
content_type=None, is_default=False):
super(AppAssetFullTO, self).__init__(kind, url, scale_x)
self.id = unicode(key)
self.app_ids = app_ids
self.uploaded_on = uploaded_on
self.modified_on = modified_on
self.content_type = content_type
self.is_default = is_default
@classmethod
def from_model(cls, asset):
"""
Args:
asset (rogerthat.models.apps.AppAsset)
"""
return cls(asset.key.id(), asset.asset_type, asset.serving_url, asset.scale_x, asset.app_ids, asset.uploaded_on,
asset.modified_on, asset.content_type, asset.is_default)
class DefaultBrandingTO(object):
id = unicode_property('1')
branding = unicode_property('2')
app_ids = unicode_list_property('3')
branding_type = unicode_property('4')
is_default = bool_property('5')
def __init__(self, key=None, branding=None, app_ids=None, branding_type=None, is_default=False):
self.id = unicode(key)
self.branding = branding
self.app_ids = app_ids
self.branding_type = branding_type
self.is_default = is_default
@classmethod
def from_model(cls, model):
"""
Args:
model (rogerthat.models.apps.DefaultBranding)
"""
return cls(model.key.id(), model.branding, model.app_ids, model.branding_type, model.is_default)
class PutLoyaltyUserResultTO(object):
url = unicode_property('1')
email = unicode_property('2')
app_id = unicode_property('3')
class GetAppAssetRequestTO(object):
kind = unicode_property('1')
class GetAppAssetResponseTO(AppAssetTO):
pass
class UpdateAppAssetRequestTO(AppAssetTO):
pass
class UpdateAppAssetResponseTO(object):
pass
class AppTranslationTO(object):
key = unicode_property('key')
value = unicode_property('value')
def __init__(self, key, value):
self.key = key
self.value = value
class CreateEmbeddedApplicationTO(TO):
name = unicode_property('name')
file = unicode_property('file')
tags = unicode_list_property('tags')
url_regexes = unicode_list_property('url_regexes', default=[])
title = unicode_property('title')
description = unicode_property('description')
types = unicode_list_property('types')
app_types = long_list_property('app_types', default=[])
class UpdateEmbeddedApplicationTO(CreateEmbeddedApplicationTO):
pass
# For requests to the app
class EmbeddedAppTO(TO):
name = unicode_property('name')
title = unicode_property('title')
description = unicode_property('description')
types = unicode_list_property('types', default=[])
serving_url = unicode_property('serving_url')
version = long_property('version')
url_regexes = unicode_list_property('url_regexes', default=[])
@classmethod
def from_model(cls, model):
return cls.from_dict(model.to_dict())
class GetEmbeddedAppsResponseTO(TO):
embedded_apps = typed_property('embedded_apps', EmbeddedAppTO, True)
class GetEmbeddedAppsRequestTO(TO):
type = unicode_property('type', default=None) # optional
class GetEmbeddedAppResponseTO(EmbeddedAppTO):
pass
class GetEmbeddedAppRequestTO(TO):
name = unicode_property('name')
class UpdateEmbeddedAppRequestTO(EmbeddedAppTO):
pass
class UpdateEmbeddedAppResponseTO(TO):
pass
class UpdateEmbeddedAppsRequestTO(GetEmbeddedAppsResponseTO):
pass
class UpdateEmbeddedAppsResponseTO(TO):
pass
|
|
"""
Support for LG TV running on NetCast 3 or 4.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.lg_netcast/
"""
from datetime import timedelta
import logging
from requests import RequestException
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, PLATFORM_SCHEMA,
SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
SUPPORT_SELECT_SOURCE, SUPPORT_PLAY, MEDIA_TYPE_CHANNEL, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_ACCESS_TOKEN,
STATE_OFF, STATE_PLAYING, STATE_PAUSED, STATE_UNKNOWN)
import homeassistant.util as util
REQUIREMENTS = ['https://github.com/wokar/pylgnetcast/archive/'
'v0.2.0.zip#pylgnetcast==0.2.0']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'LG TV Remote'
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SUPPORT_LGTV = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_ACCESS_TOKEN, default=None):
vol.All(cv.string, vol.Length(max=6)),
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the LG TV platform."""
from pylgnetcast import LgNetCastClient
client = LgNetCastClient(config.get(CONF_HOST),
config.get(CONF_ACCESS_TOKEN))
add_devices([LgTVDevice(client, config[CONF_NAME])])
class LgTVDevice(MediaPlayerDevice):
"""Representation of a LG TV."""
def __init__(self, client, name):
"""Initialize the LG TV device."""
self._client = client
self._name = name
self._muted = False
# Assume that the TV is in Play mode
self._playing = True
self._volume = 0
self._channel_name = ''
self._program_name = ''
self._state = STATE_UNKNOWN
self._sources = {}
self._source_names = []
self.update()
def send_command(self, command):
"""Send remote control commands to the TV."""
from pylgnetcast import LgNetCastError
try:
with self._client as client:
client.send_command(command)
except (LgNetCastError, RequestException):
self._state = STATE_OFF
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Retrieve the latest data from the LG TV."""
from pylgnetcast import LgNetCastError
try:
with self._client as client:
self._state = STATE_PLAYING
volume_info = client.query_data('volume_info')
if volume_info:
volume_info = volume_info[0]
self._volume = float(volume_info.find('level').text)
self._muted = volume_info.find('mute').text == 'true'
channel_info = client.query_data('cur_channel')
if channel_info:
channel_info = channel_info[0]
self._channel_name = channel_info.find('chname').text
self._program_name = channel_info.find('progName').text
channel_list = client.query_data('channel_list')
if channel_list:
channel_names = []
for channel in channel_list:
channel_name = channel.find('chname')
if channel_name is not None:
channel_names.append(str(channel_name.text))
self._sources = dict(zip(channel_names, channel_list))
# sort source names by the major channel number
source_tuples = [(k, self._sources[k].find('major').text)
for k in self._sources.keys()]
sorted_sources = sorted(
source_tuples, key=lambda channel: int(channel[1]))
self._source_names = [n for n, k in sorted_sources]
except (LgNetCastError, RequestException):
self._state = STATE_OFF
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / 100.0
@property
def source(self):
"""Return the current input source."""
return self._channel_name
@property
def source_list(self):
"""List of available input sources."""
return self._source_names
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_CHANNEL
@property
def media_channel(self):
"""Channel currently playing."""
return self._channel_name
@property
def media_title(self):
"""Title of current playing media."""
return self._program_name
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_LGTV
@property
def media_image_url(self):
"""URL for obtaining a screen capture."""
return self._client.url + 'data?target=screen_image'
def turn_off(self):
"""Turn off media player."""
self.send_command(1)
def volume_up(self):
"""Volume up the media player."""
self.send_command(24)
def volume_down(self):
"""Volume down media player."""
self.send_command(25)
def mute_volume(self, mute):
"""Send mute command."""
self.send_command(26)
def select_source(self, source):
"""Select input source."""
self._client.change_channel(self._sources[source])
def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._playing = True
self._state = STATE_PLAYING
self.send_command(33)
def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self._state = STATE_PAUSED
self.send_command(34)
def media_next_track(self):
"""Send next track command."""
self.send_command(36)
def media_previous_track(self):
"""Send the previous track command."""
self.send_command(37)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import ctypes
import argparse
from ctypes import sizeof, pointer
import elf
def add_symbols(elff, symbols):
#
# THE PLAN:
#
# - Keep the exiting file structure but create our own sections.
# - add our data (symtab + symstrtab + shstrtab)
# - Add our sections (ghosts + symtabhdr + strtabhdr + shstrtabhdr)
# at the end of the file.
# - Hijack e_shoff and point it to our sections.
#
shstrtab = b""
shdr_t = elff.elf_shdr()
shdrs = []
# Add null section.
nullhdr = shdr_t()
shstrtab += b"\x00"
shdrs.append(nullhdr)
# Build a ghost section for each segment.
# We need ghosts to handle binary whith no sections.
nbg = 0
for phdr in elff.phdrs:
if phdr.p_type != elf.PT_LOAD:
continue
shdr = shdr_t()
shdr.sh_name = len(shstrtab)
shstrtab += bytes("GHOST%d_%.*x\x00" % (
nbg, elff.wordsize // 4, phdr.p_vaddr), "utf8")
shdr.sh_type = elf.SHT_NOBITS
shdr.sh_flags = elf.SHF_ALLOC
if phdr.p_flags & elf.PF_X:
shdr.sh_flags |= elf.SHF_EXECINSTR
if phdr.p_flags & elf.PF_W:
shdr.sh_flags |= elf.SHF_WRITE
shdr.sh_addr = phdr.p_vaddr
shdr.sh_offset = phdr.p_offset
shdr.sh_size = phdr.p_memsz
shdr.sh_link = 0
shdr.sh_info = 0
shdr.sh_addralign = 1 # Probably fine.
shdr.sh_entsize = 0
shdrs.append(shdr)
nbg += 1
# If there are shdr's in the original binary
# we try to keep them. We do *not* need to
# rewrite the original symtab.
shoffset = len(shdrs)
for shdr in elff.shdrs:
shdr = shdr.copy()
try:
name = bytes(elff.shstr(shdr.sh_name))
except KeyError:
name = b"corrupt\x00"
shdr.sh_name = len(shstrtab)
shstrtab += name
shdr.sh_link += shoffset
if shdr.sh_flags & elf.SHF_INFO_LINK:
shdr.sh_info += shoffset
shdrs.append(shdr)
symstrtab = b""
# Collect symbols:
sym_t = elff.elf_sym()
symtab = []
nullsym = sym_t()
nullsym.st_name = len(symstrtab)
symstrtab += b"\x00"
symtab.append(nullsym)
for name, addr, size in symbols:
for shndx, shdr in enumerate(shdrs):
if shdr.sh_addr <= addr < shdr.sh_addr + shdr.sh_size:
break
else:
print("ignored (bad addr): %#x %s" % (addr, name))
continue
sym = sym_t()
sym.st_name = len(symstrtab)
symstrtab += bytes(name, "utf8") + b"\x00"
sym.st_value = addr
sym.st_size = size
sym.st_info = (1 << 4) | 2 # GLOBAL FUNC
sym.st_other = 0
sym.st_shndx = shndx
symtab.append(sym)
# Add symtab
symtabhdr = shdr_t()
symtabhdr.sh_name = len(shstrtab)
shstrtab += b".wsymtab\x00"
symtabhdr.sh_type = elf.SHT_SYMTAB
symtabhdr.sh_flags = 0
symtabhdr.sh_addr = 0
symtabhdr.sh_offset = len(elff.data)
symtabhdr.sh_size = len(symtab) * sizeof(sym_t)
symtabhdr.sh_link = len(shdrs) + 1 # list + [us, STRTAB]
symtabhdr.sh_info = 0 # ?
symtabhdr.sh_addralign = 1
symtabhdr.sh_entsize = sizeof(sym_t)
shdrs.append(symtabhdr)
# Add symstrtab
symstrtabhdr = shdr_t()
symstrtabhdr.sh_name = len(shstrtab)
shstrtab += b".strtab\x00"
symstrtabhdr.sh_type = elf.SHT_STRTAB
symstrtabhdr.sh_flags = 0
symstrtabhdr.sh_addr = 0
symstrtabhdr.sh_offset = len(elff.data) + symtabhdr.sh_size
symstrtabhdr.sh_size = len(symstrtab)
symstrtabhdr.sh_link = 0
symstrtabhdr.sh_info = 0
symstrtabhdr.sh_addralign = 1
symstrtabhdr.sh_entsize = 0
shdrs.append(symstrtabhdr)
# Add shstrtab
shstrtabhdr = shdr_t()
shstrtabhdr.sh_name = len(shstrtab)
shstrtab += b".shstrtab\x00"
shstrtabhdr.sh_type = elf.SHT_STRTAB
shstrtabhdr.sh_flags = 0
shstrtabhdr.sh_addr = 0
shstrtabhdr.sh_offset = len(elff.data) + symtabhdr.sh_size + symstrtabhdr.sh_size
shstrtabhdr.sh_size = len(shstrtab)
shstrtabhdr.sh_link = 0
shstrtabhdr.sh_info = 0
shstrtabhdr.sh_addralign = 1
shstrtabhdr.sh_entsize = 0
shdrs.append(shstrtabhdr)
# We have all the elements,
# build the new file.
newdata = bytearray(len(elff.data)
+ symtabhdr.sh_size
+ symstrtabhdr.sh_size
+ shstrtabhdr.sh_size
+ sizeof(shdr_t) * len(shdrs))
offset = len(elff.data)
newdata[0:offset] = elff.data
for sym in symtab:
newdata[offset:offset+sizeof(sym)] = sym
offset += sizeof(sym)
newdata[offset:offset+len(symstrtab)] = symstrtab
offset += len(symstrtab)
newdata[offset:offset+len(shstrtab)] = shstrtab
offset += len(shstrtab)
shoff = offset
for shdr in shdrs:
newdata[offset:offset+sizeof(shdr)] = shdr
offset += sizeof(shdr)
newelf = elf.ELFFile(newdata)
# Don't forget to link everythin back to ehdr:
newelf.ehdr.e_shoff = shoff
newelf.ehdr.e_shentsize = ctypes.sizeof(shdr_t)
newelf.ehdr.e_shnum = len(shdrs)
newelf.ehdr.e_shstrndx = len(shdrs) - 1
return newelf
class FileParser(object):
def __init__(self, path):
self.file = argparse.FileType("r")(path)
def log(self, msg, *args, **kwargs):
print("%s: %s" % (self.__class__.__name__, msg), *args, **kwargs)
class FlatParser(FileParser):
def get_symbols(self, target, verbose=False):
symbols = []
for line in self.file:
if line.startswith("#"):
continue
splited = line.split()
if len(splited) == 3:
addr, name, size = splited
elif len(splited) == 2:
addr, name = splited
size = "0"
else:
continue
addr = int(addr, 16)
size = int(size, 16)
if verbose:
self.log("%15s = %#x,\tsize=%d" % (
name, addr, size))
symbols.append((name, addr, size))
return symbols
class NMParser(FileParser):
def get_symbols(self, target, verbose=False):
symbols = []
for line in self.file:
if line.startswith("#"):
continue
splited = line.split()
if len(splited) != 3:
continue
name, addr = splited[2], int(splited[0], 16)
if verbose:
self.log("%15s = %#x,\tsize=%d" % (
name, addr, 0))
symbols.append((name, addr, 0))
return symbols
class IDAParser(FileParser):
def get_symbols(self, target, verbose=False):
# OK, IDA is weird, it uses section-relative addres.
# UNLESS there are no sections, then it uses segments.
# No way to know... Lets guess.
for line in self.file:
if line.split() == ["Start", "Length", "Name", "Class"]:
break
sections = []
for line in self.file:
splited = line.split()
if len(splited) != 4:
break
start_, _, _, name = splited
start, _ = start_.split(":")
name = bytes(name, "utf8") + b"\x00"
sections.append([int(start, 16), name])
# Ok, this is where we guess, kinda.
# Lets check if all those sections exit,
# otherwise we'll consider they are segments.
i = 0
for shndx, shdr in enumerate(target.shdrs):
if i == len(sections):
break
if target.shstr(shdr.sh_name) == sections[i][1]:
sections[i][1] = shndx
i += 1
if i == len(sections):
translations = {}
for i, shndx in sections:
translations[i] = target.shdrs[shndx].sh_addr
else:
self.log("Couldnt match %s as a section. Assuming segments." % (sections[i], ))
translations = {}
for i, _ in sections:
translations[i] = target.phdrs[i+1].p_vaddr
# OK, done guessing.
for line in self.file:
if line.split() == ["Address", "Publics", "by", "Value"]:
break
next(self.file) # burn empty line.
symbols = []
for line in self.file:
splited = line.split()
if len(splited) != 2:
break
segment_offset, name = splited
segment, offset = segment_offset.split(":")
segment = int(segment, 16)
offset = int(offset, 16)
addr = translations[segment] + offset
if verbose:
self.log("%15s = %#x:%x + %#x = %#x,\tsize=%d" % (
name, segment, translations[segment], offset, addr, 0))
symbols.append((name, addr, 0))
return symbols
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("input", type=argparse.FileType("rb"))
parser.add_argument("output", type=argparse.FileType("wb"))
parser.add_argument("-v", "--verbose", action="store_true")
parser.set_defaults(symbols=[])
parser.add_argument("-f", "--flat", help="flat map format. (addr, name, [size])",
type=FlatParser, dest="symbols", action="append")
parser.add_argument("-i", "--ida", help="IDA .map format.",
type=IDAParser, dest="symbols", action="append")
parser.add_argument("-n", "--nm", help="nm format.",
type=NMParser, dest="symbols", action="append")
args = parser.parse_args()
elff = elf.ELFFile(bytearray(args.input.read()))
symbols = []
for parser in args.symbols:
symbols += parser.get_symbols(elff, verbose=args.verbose)
if not symbols:
print("Warning: No symbols are being added. "
"I'll still try though, even if its pointless.")
newelf = add_symbols(elff, symbols)
args.output.write(newelf.data)
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the static list of units for SC2. Generated by bin/gen_data.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
# pylint: disable=invalid-name
class Neutral(enum.IntEnum):
"""Neutral units."""
BattleStationMineralField = 886
BattleStationMineralField750 = 887
CarrionBird = 322
CleaningBot = 612
CollapsibleRockTower = 609
CollapsibleRockTowerDebris = 490
CollapsibleRockTowerDebrisRampLeft = 518
CollapsibleRockTowerDebrisRampRight = 517
CollapsibleRockTowerDiagonal = 588
CollapsibleRockTowerPushUnit = 561
CollapsibleRockTowerPushUnitRampLeft = 564
CollapsibleRockTowerPushUnitRampRight = 563
CollapsibleRockTowerRampLeft = 664
CollapsibleRockTowerRampRight = 663
CollapsibleTerranTower = 610
CollapsibleTerranTowerDebris = 485
CollapsibleTerranTowerDiagonal = 589
CollapsibleTerranTowerPushUnit = 562
CollapsibleTerranTowerPushUnitRampLeft = 559
CollapsibleTerranTowerPushUnitRampRight = 560
CollapsibleTerranTowerRampLeft = 590
CollapsibleTerranTowerRampRight = 591
Crabeetle = 662
Debris2x2NonConjoined = 475
DebrisRampLeft = 486
DebrisRampRight = 487
DestructibleBillboardTall = 350
DestructibleCityDebris4x4 = 628
DestructibleCityDebris6x6 = 629
DestructibleCityDebrisHugeDiagonalBLUR = 630
DestructibleDebris4x4 = 364
DestructibleDebris6x6 = 365
DestructibleDebrisRampDiagonalHugeBLUR = 377
DestructibleDebrisRampDiagonalHugeULBR = 376
DestructibleIce4x4 = 648
DestructibleIce6x6 = 649
DestructibleIceDiagonalHugeBLUR = 651
DestructibleRampDiagonalHugeBLUR = 373
DestructibleRampDiagonalHugeULBR = 372
DestructibleRock6x6 = 371
DestructibleRockEx14x4 = 638
DestructibleRockEx16x6 = 639
DestructibleRockEx1DiagonalHugeBLUR = 641
DestructibleRockEx1DiagonalHugeULBR = 640
DestructibleRockEx1HorizontalHuge = 643
DestructibleRockEx1VerticalHuge = 642
Dog = 336
InhibitorZoneMedium = 1958
InhibitorZoneSmall = 1957
KarakFemale = 324
LabBot = 661
LabMineralField = 665
LabMineralField750 = 666
Lyote = 321
MineralField = 341
MineralField450 = 1961
MineralField750 = 483
ProtossVespeneGeyser = 608
PurifierMineralField = 884
PurifierMineralField750 = 885
PurifierRichMineralField = 796
PurifierRichMineralField750 = 797
PurifierVespeneGeyser = 880
ReptileCrate = 877
RichMineralField = 146
RichMineralField750 = 147
RichVespeneGeyser = 344
Scantipede = 335
ShakurasVespeneGeyser = 881
SpacePlatformGeyser = 343
UnbuildableBricksDestructible = 473
UnbuildablePlatesDestructible = 474
UnbuildableRocksDestructible = 472
UtilityBot = 330
VespeneGeyser = 342
XelNagaDestructibleBlocker8NE = 1904
XelNagaDestructibleBlocker8SW = 1908
XelNagaTower = 149
class Protoss(enum.IntEnum):
"""Protoss units."""
Adept = 311
AdeptPhaseShift = 801
Archon = 141
Assimilator = 61
AssimilatorRich = 1955
Carrier = 79
Colossus = 4
CyberneticsCore = 72
DarkShrine = 69
DarkTemplar = 76
Disruptor = 694
DisruptorPhased = 733
FleetBeacon = 64
ForceField = 135
Forge = 63
Gateway = 62
HighTemplar = 75
Immortal = 83
Interceptor = 85
Mothership = 10
MothershipCore = 488
Nexus = 59
Observer = 82
ObserverSurveillanceMode = 1911
Oracle = 495
Phoenix = 78
PhotonCannon = 66
Probe = 84
Pylon = 60
PylonOvercharged = 894
RoboticsBay = 70
RoboticsFacility = 71
Sentry = 77
ShieldBattery = 1910
Stalker = 74
Stargate = 67
StasisTrap = 732
Tempest = 496
TemplarArchive = 68
TwilightCouncil = 65
VoidRay = 80
WarpGate = 133
WarpPrism = 81
WarpPrismPhasing = 136
Zealot = 73
class Terran(enum.IntEnum):
"""Terran units."""
Armory = 29
AutoTurret = 31
Banshee = 55
Barracks = 21
BarracksFlying = 46
BarracksReactor = 38
BarracksTechLab = 37
Battlecruiser = 57
Bunker = 24
CommandCenter = 18
CommandCenterFlying = 36
Cyclone = 692
EngineeringBay = 22
Factory = 27
FactoryFlying = 43
FactoryReactor = 40
FactoryTechLab = 39
FusionCore = 30
Ghost = 50
GhostAcademy = 26
GhostAlternate = 144
GhostNova = 145
Hellion = 53
Hellbat = 484
KD8Charge = 830
Liberator = 689
LiberatorAG = 734
MULE = 268
Marauder = 51
Marine = 48
Medivac = 54
MissileTurret = 23
Nuke = 58
OrbitalCommand = 132
OrbitalCommandFlying = 134
PlanetaryFortress = 130
PointDefenseDrone = 11
Raven = 56
Reactor = 6
Reaper = 49
Refinery = 20
RefineryRich = 1960
RepairDrone = 1913
SCV = 45
SensorTower = 25
SiegeTank = 33
SiegeTankSieged = 32
Starport = 28
StarportFlying = 44
StarportReactor = 42
StarportTechLab = 41
SupplyDepot = 19
SupplyDepotLowered = 47
TechLab = 5
Thor = 52
ThorHighImpactMode = 691
VikingAssault = 34
VikingFighter = 35
WidowMine = 498
WidowMineBurrowed = 500
class Zerg(enum.IntEnum):
"""Zerg units."""
Baneling = 9
BanelingBurrowed = 115
BanelingCocoon = 8
BanelingNest = 96
BroodLord = 114
BroodLordCocoon = 113
Broodling = 289
BroodlingEscort = 143
Changeling = 12
ChangelingMarine = 15
ChangelingMarineShield = 14
ChangelingZealot = 13
ChangelingZergling = 17
ChangelingZerglingWings = 16
Cocoon = 103
Corruptor = 112
CreepTumor = 87
CreepTumorBurrowed = 137
CreepTumorQueen = 138
Drone = 104
DroneBurrowed = 116
EvolutionChamber = 90
Extractor = 88
ExtractorRich = 1956
GreaterSpire = 102
Hatchery = 86
Hive = 101
Hydralisk = 107
HydraliskBurrowed = 117
HydraliskDen = 91
InfestationPit = 94
InfestedTerran = 7
InfestedTerranBurrowed = 120
InfestedTerranCocoon = 150
Infestor = 111
InfestorBurrowed = 127
Lair = 100
Larva = 151
Locust = 489
LocustFlying = 693
Lurker = 502
LurkerBurrowed = 503
LurkerDen = 504
LurkerCocoon = 501
Mutalisk = 108
NydusCanal = 142
NydusNetwork = 95
Overlord = 106
OverlordTransport = 893
OverlordTransportCocoon = 892
Overseer = 129
OverseerCocoon = 128
OverseerOversightMode = 1912
ParasiticBombDummy = 824
Queen = 126
QueenBurrowed = 125
Ravager = 688
RavagerBurrowed = 690
RavagerCocoon = 687
Roach = 110
RoachBurrowed = 118
RoachWarren = 97
SpawningPool = 89
SpineCrawler = 98
SpineCrawlerUprooted = 139
Spire = 92
SporeCrawler = 99
SporeCrawlerUprooted = 140
SwarmHost = 494
SwarmHostBurrowed = 493
Ultralisk = 109
UltraliskBurrowed = 131
UltraliskCavern = 93
Viper = 499
Zergling = 105
ZerglingBurrowed = 119
def get_unit_type(unit_id):
for race in (Neutral, Protoss, Terran, Zerg):
try:
return race(unit_id)
except ValueError:
pass # Wrong race.
|
|
#!/usr/bin/python2.4
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable-msg=W0231
"""This module contains implementations for various rules."""
# Supress pylint invalid import order
# pylint: disable-msg=C6203
# Supress pylint invalid use of super on old style class
# pylint: disable-msg=E1002
import datetime
import logging
import time
import appenginepatcher
from django.utils import translation
from google.appengine.ext import db
from core import errors
from core import models
from core import notifications
from core import request_cache
from core import rules
from core import service_factory
from core import utils
_ = translation.ugettext
STATUS = utils.RegistrationStatus
class MaxNumberRegisteredBy(rules.RuleRegisterResource):
"""Abstract class to limit the number of people who can be registered.
Attributes:
max_people: Maximum number of people who can registered.
"""
# Key to keep track of how many students are currently enrolled.
_KEY_PREFIX = '_'
def __init__(self, max_people, *args, **kargs):
super(MaxNumberRegisteredBy, self).__init__(*args, **kargs)
self.max_people = max_people
def _NormalizeKey(self, key):
if not key:
key = self._KEY_PREFIX
# we normalize the key to a string
return str(key)
def _Evaluate(self, unused_initial_state, target_state, key=None):
"""Proxy around RuleRegister.Evaluate to deal with memcache state/keys.
Args:
unused_initial_state: See RuleRegister.Evaluate
target_state: See RuleRegister.Evaluate
key: The resource key to be used for maintaining the state in mem cache
as far as incrementing/decrementing is concerned.
Returns:
Returns value that RuleRegister.Evaluate() should return.
"""
key = self._NormalizeKey(key)
resource_remaining = None
if target_state == utils.RegistrationStatus.ENROLLED:
num_students = self._Incr(key)
if num_students is None:
# Cannot look up the value, we waitlist.
logging.error('Could not increment key, val, namespace [%s,%s,%s]', key,
self._Get(key, 0), self.namespace)
if self.online:
value = utils.RegistrationStatus.WAITLISTED
resource_remaining = 0 # Conservative estimate.
else: # in offline mode we fail and retry again.
assert False, 'Could not access memcache'
elif num_students <= self.max_people:
value = utils.RegistrationStatus.ENROLLED
resource_remaining = self.max_people - num_students
else:
value = utils.RegistrationStatus.WAITLISTED
resource_remaining = self.max_people - num_students
if self.offline:
self._Decr(key)
# In online mode we don't decrement the counter because we want this
# rule to stay in waiting state for all further requests.
# Reasoning is that we don't want another user get ENROLLED after one
# got WAITING for fairness reasons (since the WAITING one may get denied
# afterwards with back-end processing)
else:
# This rule only tries to limit number of enrollments. Anything else is OK
value = target_state
resources_used = self._Get(key)
if resources_used is None:
resources_used = self.max_people
# Can t lookup resources used, we take a conservative approach
resource_remaining = self.max_people - resources_used
# We build the limiting resource key
contextualized_key = self.key + key
return {'status': value, 'rule_tags': [contextualized_key],
'resource_remaining': resource_remaining}
def _ProcessOnlineOutcome(self, eval_state, final_state, key):
if eval_state == utils.RegistrationStatus.UNREGISTERED:
if final_state == utils.RegistrationStatus.UNREGISTERED:
# Need to decrement the counter. This _ProcessOutcome for UNREGISTERED
# is only being called by the offline process on the online rules.
# This is the only case where the offline process impacts the online
# context. Note that the online process never notifies offline context.
self._Decr(key)
else:
assert eval_state in [utils.RegistrationStatus.WAITLISTED,
utils.RegistrationStatus.ENROLLED]
if final_state is None:
# We incremented and dont need to.
self._Decr(key)
def _ProcessOfflineOutcome(self, eval_state, final_state, key):
if eval_state == utils.RegistrationStatus.UNREGISTERED:
if final_state == utils.RegistrationStatus.UNREGISTERED:
# we get notified but we never processed this request since this rule
# does not act on unregister actions.
self._Decr(key)
elif eval_state == utils.RegistrationStatus.ENROLLED:
if final_state != utils.RegistrationStatus.ENROLLED:
# We incremented and dont need to.
self._Decr(key)
else:
assert eval_state == utils.RegistrationStatus.WAITLISTED
if final_state == utils.RegistrationStatus.ENROLLED:
# We did not increment and need to.
self._Incr(key)
def _ProcessOutcome(self, eval_state, final_state, key=None):
key = self._NormalizeKey(key)
if self.offline:
self._ProcessOfflineOutcome(eval_state, final_state, key)
else:
self._ProcessOnlineOutcome(eval_state, final_state, key)
def ProcessOutcome(self, eval_state, final_state):
self._ProcessOutcome(eval_state, final_state)
class MaxNumberRegisteredByActivity(MaxNumberRegisteredBy):
"""Limits the number of people who can be registered for an activity.
Attributes:
max_people: Int. Maximum number of people who can register for the activity.
"""
def __init__(self, max_people, *args, **kargs):
super(MaxNumberRegisteredByActivity, self).__init__(max_people, *args,
**kargs)
def Evaluate(self, initial_state, target_state):
return MaxNumberRegisteredBy._Evaluate(self, initial_state, target_state,
None)
# Suppress pylint unused argument for overriden method
# pylint: disable-msg=W0613
@classmethod
def TagsToReprocessOnChange(cls, rule_config, program_or_activity=None):
"""Overrides parent method."""
return [rule_config.key+cls._KEY_PREFIX]
def _BuildContext(self):
"""Overrides parent method."""
query = models.UserRegistration.all()
query.filter('activity = ', self.eval_context.activity)
_UpdateQueryMode(query, self.offline)
num_students = 0
for reg in query:
if _RegistrationNeedsAccounting(reg, self.offline):
num_students += 1
return {self._KEY_PREFIX: num_students}
@classmethod
def GetDescription(cls):
return _('Limited slots for activity.')
class MaxNumberRegisteredByAccessPoint(MaxNumberRegisteredBy):
"""Limits the number of people who can be registered for an access point.
Due to datastore limitations, an access point cannot accept more than 1000
people at this time.
Attributes:
max_people: Maximum number of people for that access point.
access_point_key: List of AccessPoint keys to be used for this rule.
"""
def __init__(self, max_people, access_point_keys, *args, **kargs):
super(MaxNumberRegisteredByAccessPoint, self).__init__(max_people, *args,
**kargs)
self.access_point_keys = access_point_keys
def _BuildContext(self):
"""Overrides parent method."""
return self._BuildContextFromAccessPoints(self.access_point_keys)
@classmethod
def CanProcessMultipleSchedules(cls):
return False
def _BuildContextFromAccessPoints(self, access_point_keys):
"""Builds a context from a list of access point keys.
Args:
access_point_keys: List of access point keys.
Returns:
A dictionary of key/values representing the context.
"""
query = models.UserRegistration.all()
query.filter('activity = ', self.eval_context.activity)
_UpdateQueryMode(query, self.offline)
keys = {}
for reg in query:
if _RegistrationNeedsAccounting(reg, self.offline):
for schedule_key, ap_key in zip(reg.schedule_list,
reg.access_point_list):
if ap_key in access_point_keys:
sched_key = str(schedule_key)
# This user registration is relevant to this rule
keys[sched_key] = keys.get(sched_key, 0) + 1
return keys
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
return self._EvaluateAccessPoints(initial_state, target_state,
self.access_point_keys)
def _EvaluateAccessPoints(self, initial_state, target_state,
access_point_keys):
"""Evaluates the rule based on the given list of access point keys."""
# TODO(user): append .value once we have the right evalcontext
# We take the first schedule/access point because this rule can not process
# multiple schedules. As such, its context is populated with only one entry
# at the rules level.
access_point_key = self.eval_context.access_point_list[0].key()
schedule_key = self.eval_context.schedule_list[0].key()
if access_point_key in access_point_keys:
# This rule applies to this schedule
return self._Evaluate(initial_state, target_state,
schedule_key)
else:
return {'status': target_state, 'resource_remaining': None,
'rule_tags': []}
def _ProcessOutcomeAccessPoints(self, eval_state, final_state,
access_point_keys):
for schedule, access_point in zip(self.eval_context.schedule_list,
self.eval_context.access_point_list):
if access_point.key() in access_point_keys:
self._ProcessOutcome(eval_state, final_state, schedule.key())
def ProcessOutcome(self, eval_state, final_state):
return self._ProcessOutcomeAccessPoints(eval_state, final_state,
self.access_point_keys)
@classmethod
def GetDescription(cls):
return _('Limited slots for attending location')
class MaxNumberRegisteredByAccessPointTag(MaxNumberRegisteredByAccessPoint):
"""Limits the number of people who can be registered for an access point tag.
Every access point can have a tag associated with it. For example, both
'Lincoln Center' and 'War Room' access points can have NYC tag.
This rule can enforce that no more than 20 people can register with NYC.
Attributes:
max_people: Maximum number of people for that access point tag.
access_point_tags: A string list of tags to be used for this rule.
"""
def __init__(self, max_people, access_point_tags, *args, **kargs):
ap_keys = kargs['eval_context'].activity.GetAccessPoints()
relevant_keys = _GetRelevantAccessPointKeys(ap_keys, access_point_tags)
super(MaxNumberRegisteredByAccessPointTag, self).__init__(max_people,
relevant_keys,
*args, **kargs)
@classmethod
def GetDescription(cls):
return _('Limited slots for attending location type')
def _GetRelevantAccessPointKeys(access_point_keys, access_point_tags):
"""Returns a list of access point keys relevant to given tags.
Args:
access_point_keys: A list of AccessPoint keys.
access_point_tags: A string list of access point tags.
Returns:
The subset of access_point_keys which corresponding access points have ALL
the given access_point_tags.
"""
# Get relevant access points from tags
aps_from_tags = _GetAccessPointsWithTags(access_point_tags)
# Extract relevant access point keys
ap_keys_from_tags = [x.key() for x in aps_from_tags]
# Interesect relevant access point keys with user input.
access_point_keys = set(ap_keys_from_tags).intersection(access_point_keys)
return access_point_keys
class TimeFrameRegistrationByActivity(rules.RuleRegister):
"""Limits the time frame for registration based for an activity.
This rule limits the time frame in which people can register for a
particular activity. People cannot register after the time frame has elapsed.
If someone registers before the time frame, that person will be placed on the
waiting list for the particular access point. Once the time frame arrives the
person will be automatically enrolled - as long as other rules are satisfied.
Attributes:
start_time: Datetime at which people can start registering.
end_time: Datetime after which people cannot register.
"""
def __init__(self, start_time, end_time, *args, **kargs):
super(TimeFrameRegistrationByActivity, self).__init__(*args, **kargs)
self.start_time = datetime.datetime.fromtimestamp(start_time)
self.end_time = datetime.datetime.fromtimestamp(end_time)
def Evaluate(self, initial_state, target_state):
if target_state == utils.RegistrationStatus.ENROLLED:
value = _CanRegisterTimeWindows(initial_state,
self.eval_context.queue_time,
self.start_time, self.end_time)
else:
value = target_state
return {'status': value}
@classmethod
def GetDescription(cls):
return _('Registration window.')
class TimeFrameRegistrationByAccessPointTag(rules.RuleRegister):
"""Limits the time frame for registration based on access point tag.
This rule limits the time frame in which people can register for a
particular activity / access point tag. People cannot register after the time
frame has elapsed.
If someone registers before the time frame, that person will be placed on the
waiting list. Once the time frame arrives the person will be automatically
enrolled as long as other rules are satisfied.
Attributes:
start_time: Datetime at which people can start registering.
end_time: Datetime after which people cannot register any more.
access_point_tags: List of access point tags for this rule.
"""
def __init__(self, start_time, end_time, access_point_tags=None,
*args, **kargs):
super(TimeFrameRegistrationByAccessPointTag, self).__init__(*args, **kargs)
self.start_time = datetime.datetime.fromtimestamp(start_time)
self.end_time = datetime.datetime.fromtimestamp(end_time)
self.access_point_tags = access_point_tags
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
if target_state == utils.RegistrationStatus.ENROLLED:
ap_keys = [ap.key() for ap in self.eval_context.access_point_list]
aps = _GetRelevantAccessPointKeys(ap_keys, self.access_point_tags)
if aps:
# this rule applies
return {'status': _CanRegisterTimeWindows(initial_state,
self.eval_context.queue_time,
self.start_time,
self.end_time)}
return {'status': target_state}
@classmethod
def GetDescription(cls):
return _('Registration window for attending location.')
class TimeCancelByActivity(rules.RuleRegister):
"""Enforces a time limit for late registration cancels by activity.
This rule enforces a time limit after which users will not be able to
unregister from a particular activity.
Attributes:
time_to_activity: Time in seconds until activity starts.
"""
def __init__(self, time_to_activity, *args, **kargs):
super(TimeCancelByActivity, self).__init__(*args, **kargs)
self.time_to_activity = time_to_activity
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
if (target_state == utils.RegistrationStatus.UNREGISTERED
and initial_state == utils.RegistrationStatus.ENROLLED):
# It is OK to use local time with mktime as long as all datetimes are in
# the same timezone.
start_time = self.eval_context.activity.start_time
activity_start = time.mktime(start_time.timetuple())
deadline = activity_start - self.time_to_activity
if time.mktime(self.eval_context.queue_time.timetuple()) < deadline:
return {'status': utils.RegistrationStatus.UNREGISTERED}
else:
return {'status': initial_state}
# this rule does not apply
return {'status': target_state}
@classmethod
def GetDescription(cls):
return _('Unregister deadline.')
class ManagerApproval(rules.RuleRegister):
"""Enforces students to require manager approval before attending a course."""
def __init__(self, *args, **kargs):
super(ManagerApproval, self).__init__(*args, **kargs)
self._check_and_create_approval = False
def _GetRuleTag(self):
return '%s_%s_%s' % (self.key, self.eval_context.activity.key(),
self.eval_context.user.appengine_user)
def _GetUserManager(self):
"""Gets the manager users.User object of the registering user."""
if not appenginepatcher.on_production_server:
# In dev mode user is her own manager. Change it to another user to
# test. Else registration will go preapproved. No workflow.
return self.eval_context.user.appengine_user
if not hasattr(self, '_manager'):
student_email = self.eval_context.user.appengine_user.email()
user_service = service_factory.GetUserInfoService()
manager_info = user_service.GetManagerInfo(student_email)
if manager_info is None:
self._manager = None
else:
self._manager = utils.GetAppEngineUser(manager_info.primary_email)
return self._manager
def _IsPreApproved(self):
try:
# Check if manager is trying to enroll the user through batch enrollment.
return self._GetUserManager() == self.eval_context.creator.appengine_user
# Suppress pylint catch Exception
# pylint: disable-msg=W0703
except errors.ServiceCriticalError, exception:
logging.error('[%s] %s', type(exception), exception)
assert self.online # We dont fail online, just assume, not pre approved.
return False
def _IsPreDeclined(self):
try:
return self._GetUserManager() is None # Dont know person or her manager.
# Suppress pylint catch Exception
# pylint: disable-msg=W0703
except errors.ServiceCriticalError, exception:
if not self.online:
# We dont fail online, just assume, not disapproved.
raise exception
return False
def _GetApprovalKey(self):
"""Key to be used on the approval object."""
return db.Key.from_path(models.ManagerApproval.kind(),
self._GetRuleTag())
def _GetUsableApproval(self):
approval_key = self._GetApprovalKey()
approval = request_cache.GetEntityFromKey(approval_key)
if approval is not None:
time_diff = abs(approval.queue_time - self.eval_context.queue_time)
allow_delta = datetime.timedelta(seconds=1)
if not approval.approved and time_diff >= allow_delta:
# Allows user to re-ask approval if denied previously.
approval = None # Dont use the approval object.
return approval
def _CheckAndInitiateApprovalProcess(self):
"""Initiates approval process if necessary."""
approval = self._GetUsableApproval()
if approval is None:
# Send email to manager to approve user request.
dummy_registration = models.UserRegistration(
eval_context=self.eval_context,
status=utils.RegistrationStatus.WAITLISTED,
confirmed=utils.RegistrationConfirm.PROCESSED,
active=utils.RegistrationActive.ACTIVE)
notifications.SendMail(
dummy_registration,
notifications.NotificationType.MANAGER_APPROVAL_REQUEST,
to=self._GetUserManager().email(),
cc=self.eval_context.user.appengine_user.email(),
extra_context={'approval_key': str(self._GetApprovalKey())})
# Write an approval entity to datastore.
approval_entity = models.ManagerApproval(
key_name=self._GetRuleTag(),
candidate=self.eval_context.user.appengine_user,
manager=self._GetUserManager(),
activity=self.eval_context.activity.key(),
program=self.eval_context.program.key(),
nominator=self.eval_context.creator.appengine_user,
approved=False,
manager_decision=False,
queue_time=self.eval_context.queue_time,
)
approval_entity.put()
def Evaluate(self, unused_initial_state, target_state):
"""Overrides parent method."""
if rules.IsPredictionMode():
return {'status': target_state, 'rule_tags': [self.key]}
return_status = target_state # By default accept transition.
rule_tag = None
if target_state == utils.RegistrationStatus.ENROLLED:
if self._IsPreDeclined():
return_status = None # Non google.com account or no manager.
elif not self._IsPreApproved():
rule_tag = self._GetRuleTag()
approval = self._GetUsableApproval()
if approval is None: # No usable approval, workflow to be initiated.
return_status = utils.RegistrationStatus.WAITLISTED
self._check_and_create_approval = True
elif not approval.manager_decision: # Manager did not decide.
return_status = utils.RegistrationStatus.WAITLISTED
elif not approval.approved: # Manager decided and declined.
return_status = None
rule_tags = [self.key]
if rule_tag is not None:
rule_tags.append(rule_tag)
return {'status': return_status, 'rule_tags': rule_tags}
def ProcessOutcome(self, eval_state, final_state):
"""Process the result of rule evaluation to manage rule state."""
if self.online: return # Nothing to do during online mode.
if (final_state == utils.RegistrationStatus.WAITLISTED and
self._check_and_create_approval):
assert eval_state == utils.RegistrationStatus.WAITLISTED
# Initiate the manager approval workflow.
self._CheckAndInitiateApprovalProcess()
# Suppress pylint unused argument for overriden method
# pylint: disable-msg=W0613
@classmethod
def TagsToReprocessOnChange(cls, rule_config, program_or_activity=None):
"""Overrides parent method."""
return [rule_config.key]
@classmethod
def GetDescription(cls):
return _('Needs manager approval.')
class TimeCancelByAccessPointTag(rules.RuleRegister):
"""Enforces a time limit for late registration cancels by access point tag.
This rule enforces a time limit after which users will not be able to
unregister from a particular access point tag.
Attributes:
time_to_activity: Time in seconds until activity starts.
access_point_tags: List of access point tags for this rule.
Example:
TimeCancelByAccessPointTag(3600) will allow users to unregister until up
to 1 hour before the activity starts.
"""
def __init__(self, time_to_activity, access_point_tags=None,
*args, **kargs):
super(TimeCancelByAccessPointTag, self).__init__(*args, **kargs)
self.time_to_activity = time_to_activity
self.access_point_tags = access_point_tags
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
if (target_state == utils.RegistrationStatus.UNREGISTERED
and initial_state == utils.RegistrationStatus.ENROLLED):
ap_keys = [ap.key() for ap in self.eval_context.access_point_list]
aps = _GetRelevantAccessPointKeys(ap_keys, self.access_point_tags)
if aps:
# this rule applies
start_time = self.eval_context.activity.start_time
activity_start = time.mktime(start_time.timetuple())
deadline = activity_start - self.time_to_activity
if time.mktime(self.eval_context.queue_time.timetuple()) < deadline:
return {'status': utils.RegistrationStatus.UNREGISTERED}
else:
return {'status': initial_state}
# this rule does not apply
return {'status': target_state}
@classmethod
def GetDescription(cls):
return _('Unregister deadline for attending location type.')
class EmployeeTypeRestriction(rules.RuleRegister):
"""Restricts enrollment based on employee type.
Attributes:
employee_types: List of utils.EmployeeType.XXX choices.
"""
def __init__(self, employee_types,
*args, **kargs):
super(EmployeeTypeRestriction, self).__init__(*args, **kargs)
self.employee_types = employee_types
# Supress pylint unused argument, overriding parent method
# pylint: disable-msg=W0613
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
if target_state == STATUS.ENROLLED:
# Retrieve employee type from user service
exception = None
person = None
email = self.eval_context.user.email
try:
user_service = service_factory.GetUserInfoService()
person = user_service.GetUserInfoMulti([email]).get(email)
# Suppress pylint catch Exception
# pylint: disable-msg=W0703
except errors.ServiceCriticalError, exception:
logging.error('[%s] %s', type(exception), exception)
# In dev we just let the user to be enrolled. (No user service in dev).
if not appenginepatcher.on_production_server:
return {'status': STATUS.ENROLLED}
if exception is not None: # Prod user info service problems.
if self.online: # Production online case, we waitlist.
return {'status': STATUS.WAITLISTED}
# Production offline case, we raise exception.
logging.info('User[%s] lookup failed', email)
raise exception
# Prod mode, no exception and didn't find user using user hr service.
if person is None:
logging.info('Can not lookup user [%s]',
self.eval_context.user)
return {'status': None} # Not allowed if cannot lookup user.
logging.info('Person type for %s is %s, allowing only %s',
email, person.employee_type, self.employee_types)
if person.employee_type in self.employee_types:
return {'status': STATUS.ENROLLED}
# Not allowed.
return {'status': None}
# this rule does not apply
return {'status': target_state}
@classmethod
def GetDescription(cls):
return _('Restricted by employee types.')
def _UpdateQueryMode(query, offline):
"""Updates a query filters based on offline mode.
Args:
query: The query to be updated.
offline: A boolean to indicate offline mode.
Returns:
A list of queries that give the relevant registrations
"""
if offline:
query.filter('status =', utils.RegistrationStatus.ENROLLED)
query.filter('confirmed =', utils.RegistrationConfirm.PROCESSED)
return query
def _RegistrationNeedsAccounting(reg, offline):
"""Returns True if the given registration needs to be accounted.
This method can further filter the registrations that are relevant to a query
in memory. Can be used for filtering that aren't easy to make on the datastore
and are best done in memory. For completeness of the function though the logic
in _UpdateQueryMode is also replicated to have this function usable on its own
even without filtering at the datastore query level.
Args:
reg: User registration
offline: A Boolean to indicate offline mode
Returns:
True if the given user registration needs to be taken into account when
building a context.
"""
if offline:
return (reg.status == utils.RegistrationStatus.ENROLLED and
reg.confirmed == utils.RegistrationConfirm.PROCESSED)
else:
res1 = reg.active == utils.RegistrationActive.ACTIVE
# Count registrations in transition/temporary state.
res2 = (reg.status == utils.RegistrationStatus.ENROLLED and
reg.confirmed == utils.RegistrationConfirm.NOT_READY)
# Count unregistrations that aren't yet processed by offline process. Count
# registrations that are not active anymore since unregisterOnline has
# marked them inactive. These take up resources until the offline process
# deletes the whole register-unregister entity group.
res3 = (reg.status == utils.RegistrationStatus.UNREGISTERED and
reg.confirmed == utils.RegistrationConfirm.READY)
return res1 or res2 or res3
def _CanRegisterTimeWindows(initial_state, queue_time, start_time, end_time):
"""Checks if user is allowed to register based on given time window.
If a user tries to register before the time window opens, the user is placed
on the waiting list.
Args:
initial_state: Initial state of user when registering.
queue_time: Datetime of the user request.
start_time: Datetime date of registration window.
end_time: Datetime end of registration window.
Returns:
A rules.RuleResultRegister.STATUS_XXX value or initial_state if outside
time window.
"""
if queue_time > start_time and queue_time < end_time:
value = utils.RegistrationStatus.ENROLLED
else:
format = '%Y-%m-%d %I:%M%p'
logging.debug('Can not register in time window [%s - %s] for queue time %s',
start_time.strftime(format), end_time.strftime(format),
queue_time.strftime(format))
value = initial_state
return value
def _GetAccessPointsWithTags(access_point_tags):
"""Returns access points which contain every tag from the input.
Args:
access_point_tags:
A list of strings representing access point tags, with a maximum of 30
entries.
Returns:
An iterator of AccessPoint such that every AccessPoint contains all the tags
specified in access_point_tags.
"""
query = models.AccessPoint.all()
for tag in access_point_tags:
# We need access points which have EVERY tag from access_point_tags.
# The IN <list> clause in appengine returns entries which have ANY of the
# tags in the <list>. So we need to build multiple IN clauses to get a full
# match.
query.filter('tags in ', [tag])
return query
class LockPastActivity(rules.RuleRegister):
"""Locks registrations for activities in the past."""
def __init__(self, *args, **kargs):
super(LockPastActivity, self).__init__(*args, **kargs)
def Evaluate(self, initial_state, target_state):
"""Overrides parent method."""
if self.online: # Only operate on online mode, accept everything offline.
lock_time = self.eval_context.activity.start_time
if self.eval_context.queue_time > lock_time:
return {'status': initial_state} # Deny any state changes.
return {'status': target_state} # Accept transition.
@classmethod
def GetDescription(cls):
return _('Registrations locked for past activities.')
|
|
#!/usr/bin/env python
#
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import codecs
import logging
import os
import re
import doctest
import json
import operator
import testtools
from testtools.content import text_content
import xml.etree.ElementTree as XML
from six.moves import configparser
from six.moves import StringIO
from yaml import safe_dump
# This dance deals with the fact that we want unittest.mock if
# we're on Python 3.4 and later, and non-stdlib mock otherwise.
try:
from unittest import mock
except ImportError:
import mock # noqa
import jenkins_jobs.local_yaml as yaml
from jenkins_jobs.parser import YamlParser
from jenkins_jobs.xml_config import XmlJob
from jenkins_jobs.modules import (project_flow,
project_matrix,
project_maven,
project_multijob)
def get_scenarios(fixtures_path, in_ext='yaml', out_ext='xml',
plugins_info_ext='plugins_info.yaml',
filter_func=None):
"""Returns a list of scenarios, each scenario being described
by two parameters (yaml and xml filenames by default).
- content of the fixture output file (aka expected)
"""
scenarios = []
files = []
for dirpath, dirs, fs in os.walk(fixtures_path):
files.extend([os.path.join(dirpath, f) for f in fs])
input_files = [f for f in files if re.match(r'.*\.{0}$'.format(in_ext), f)]
for input_filename in input_files:
if input_filename.endswith(plugins_info_ext):
continue
if callable(filter_func) and filter_func(input_filename):
continue
output_candidate = re.sub(r'\.{0}$'.format(in_ext),
'.{0}'.format(out_ext), input_filename)
# assume empty file if no output candidate found
if output_candidate not in files:
output_candidate = None
plugins_info_candidate = re.sub(r'\.{0}$'.format(in_ext),
'.{0}'.format(plugins_info_ext),
input_filename)
if plugins_info_candidate not in files:
plugins_info_candidate = None
conf_candidate = re.sub(r'\.yaml$', '.conf', input_filename)
# If present, add the configuration file
if conf_candidate not in files:
conf_candidate = None
scenarios.append((input_filename, {
'in_filename': input_filename,
'out_filename': output_candidate,
'conf_filename': conf_candidate,
'plugins_info_filename': plugins_info_candidate,
}))
return scenarios
class BaseTestCase(object):
scenarios = []
fixtures_path = None
# TestCase settings:
maxDiff = None # always dump text difference
longMessage = True # keep normal error message when providing our
logging.basicConfig()
def _read_utf8_content(self):
# if None assume empty file
if self.out_filename is None:
return u""
# Read XML content, assuming it is unicode encoded
xml_content = u"%s" % codecs.open(self.out_filename,
'r', 'utf-8').read()
return xml_content
def _read_yaml_content(self, filename):
with open(filename, 'r') as yaml_file:
yaml_content = yaml.load(yaml_file)
return yaml_content
def test_yaml_snippet(self):
if not self.in_filename:
return
if self.conf_filename is not None:
config = configparser.ConfigParser()
config.readfp(open(self.conf_filename))
else:
config = {}
expected_xml = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
project = None
if ('project-type' in yaml_content):
if (yaml_content['project-type'] == "maven"):
project = project_maven.Maven(None)
elif (yaml_content['project-type'] == "matrix"):
project = project_matrix.Matrix(None)
elif (yaml_content['project-type'] == "flow"):
project = project_flow.Flow(None)
elif (yaml_content['project-type'] == "multijob"):
project = project_multijob.MultiJob(None)
if project:
xml_project = project.root_xml(yaml_content)
else:
xml_project = XML.Element('project')
plugins_info = None
if self.plugins_info_filename is not None:
plugins_info = self._read_yaml_content(self.plugins_info_filename)
self.addDetail("plugins-info-filename",
text_content(self.plugins_info_filename))
self.addDetail("plugins-info",
text_content(str(plugins_info)))
parser = YamlParser(config, plugins_info)
pub = self.klass(parser.registry)
# Generate the XML tree directly with modules/general
pub.gen_xml(parser, xml_project, yaml_content)
# Prettify generated XML
pretty_xml = XmlJob(xml_project, 'fixturejob').output().decode('utf-8')
self.assertThat(
pretty_xml,
testtools.matchers.DocTestMatches(expected_xml,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
class SingleJobTestCase(BaseTestCase):
def test_yaml_snippet(self):
expected_xml = self._read_utf8_content()
if self.conf_filename:
config = configparser.ConfigParser()
config.readfp(open(self.conf_filename))
else:
config = None
parser = YamlParser(config)
parser.parse(self.in_filename)
# Generate the XML tree
parser.expandYaml()
parser.generateXML()
parser.xml_jobs.sort(key=operator.attrgetter('name'))
# Prettify generated XML
pretty_xml = u"\n".join(job.output().decode('utf-8')
for job in parser.xml_jobs)
self.assertThat(
pretty_xml,
testtools.matchers.DocTestMatches(expected_xml,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
class JsonTestCase(BaseTestCase):
def test_yaml_snippet(self):
expected_json = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
pretty_json = json.dumps(yaml_content, indent=4,
separators=(',', ': '))
self.assertThat(
pretty_json,
testtools.matchers.DocTestMatches(expected_json,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
class YamlTestCase(BaseTestCase):
def test_yaml_snippet(self):
expected_yaml = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
# using json forces expansion of yaml anchors and aliases in the
# outputted yaml, otherwise it would simply appear exactly as
# entered which doesn't show that the net effect of the yaml
data = StringIO(json.dumps(yaml_content))
pretty_yaml = safe_dump(json.load(data), default_flow_style=False)
self.assertThat(
pretty_yaml,
testtools.matchers.DocTestMatches(expected_yaml,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
|
|
import ply.yacc as yacc
from parseractions import *
from fetchlexer import tokens
class ParserError(Exception):
def __init__(self, msg):
self.msg = msg
############
# Main rules
##########
precedence = (
('left', 'AND', 'OR'),
('right', 'BANG'),
)
def p_fetchsection(p):
"""fetchcode : fetchlines"""
p[0] = p[1]
def p_fetchlines(p):
"""fetchlines : fetchline fetchlines
| fetchline"""
p[0] = [p[1]]
if len(p) > 2:
p[0] += p[2]
def p_fetchline_modify(p):
"""fetchline : requestline
| paramline
| headerline
| cookieline
| filterline
| outputline"""
p[0] = p[1]
def p_error(p):
"Error in syntax"
error_msg = "Invalid token: '%s' at line %d" % (p.value, p.lineno)
raise ParserError(error_msg)
############
# Fetch section
##########
def p_get(p):
"get : LARROW"
p[0] = "GET"
def p_post(p):
"post : RARROW"
p[0] = "POST"
def p_fetchline_fetch(p):
"""requestline : NAME get STRING NEWLINE
| NAME post STRING NEWLINE"""
p[0] = FetchAction(p[1], p[2], p[3])
def p_paramline(p):
"""paramline : NAME LBRACE NAME RBRACE EQUALS STRING NEWLINE"""
p[0] = ModifyUrlAction(p[1], "PARAM", p[3], p[6])
def p_headerline(p):
"""headerline : NAME LCURLY NAME RCURLY EQUALS STRING NEWLINE"""
p[0] = ModifyUrlAction(p[1], "HEADER", p[3], p[6])
def p_cookieline(p):
"""cookieline : NAME LT NAME GT EQUALS STRING NEWLINE"""
p[0] = ModifyUrlAction(p[1], "COOKIE", p[3], p[6])
############
# Filter section
##########
def p_filterline_coarse(p):
"""filterline : NAME EQUALS LBRACE coarsefilterexpression RBRACE NAME NEWLINE"""
p[0] = CoarseFilterAction(p[1], p[4], p[6])
def p_filterline_fine(p):
"""filterline : NAME EQUALS LCURLY filterexpression RCURLY NAME NEWLINE"""
p[0] = FineFilterAction(p[1], p[4], p[6])
def p_filterexpression(p):
"""filterexpression : NAME COLON STRING"""
p[0] = BasicFilterExpression(p[1], p[3])
def p_filterexpression_noarg(p):
"""filterexpression : NAME"""
p[0] = BasicFilterExpression(p[1])
def p_coarsefilterexpression(p):
"""coarsefilterexpression : filterexpression"""
p[0] = p[1]
def p_coarsefilterexpression_neg(p):
"""coarsefilterexpression : BANG coarsefilterexpression"""
p[0] = NegFilterExpression(p[2])
def p_coarsefilterexpression_combined(p):
"""coarsefilterexpression : coarsefilterexpression AND coarsefilterexpression
| coarsefilterexpression OR coarsefilterexpression"""
p[0] = CombinedFilterExpression(p[1],p[3],p[2])
############
# Output section
##########
def p_outputline(p):
"""outputline : NAME EQUALS outputright NEWLINE"""
p[0] = OutputAssignment(p[1], p[3])
def p_outputline_dict(p):
"""outputline : NAME LBRACE STRING RBRACE EQUALS outputright NEWLINE"""
p[0] = OutputAssignment(DictAt(p[1], p[3]), p[6])
def p_outputright(p):
"""outputright : NAME
| STRING
| outputdict"""
p[0] = p[1]
def p_outputright_arrayitem(p):
"""outputright : NAME LBRACE NUMBER RBRACE"""
p[0] = ListAt(p[1], p[3])
def p_outputright_expression(p):
"""outputright : NAME PLUS NAME"""
p[0] = ListPlus(p[1], p[3])
def p_outputright_list(p):
"""outputright : LBRACE outputlistitems RBRACE"""
p[0] = p[2]
def p_outputlistitems_single(p):
"""outputlistitems : STRING"""
p[0] = [p[1]]
def p_outputlistitems_multiple(p):
"""outputlistitems : STRING COMMA outputlistitems"""
p[0] = [p[1]] + p[3]
def p_outputdict(p):
"""outputdict : DICT LCURLY outputdictitems RCURLY"""
p[0] = p[3]
def p_outputdictitems_single(p):
"""outputdictitems : STRING COLON NAME"""
p[0] = {p[1]: p[3]}
def p_outputdictitems_multiple(p):
"""outputdictitems : STRING COLON NAME COMMA outputdictitems"""
d = p[5]
d[p[1]] = p[3]
p[0] = d
############
# API
##########
def parse_input(i):
import fetchlexer
parser = yacc.yacc()
lexer = fetchlexer.get_lexer()
result = parser.parse(i, lexer=lexer)
return result
############
# Print parser rules
##########
if __name__ == "__main__":
RULES = [
("Main parsing", [p_fetchsection, p_fetchlines, p_fetchline_modify]),
("Fetch section", [p_fetchline_fetch, p_paramline, p_headerline, p_cookieline, p_get, p_post]),
("Filter section", [p_filterline_coarse, p_filterline_fine, p_filterexpression,
p_filterexpression_noarg, p_coarsefilterexpression, p_coarsefilterexpression_neg,
p_coarsefilterexpression_combined]),
("Output section", [p_outputline, p_outputline_dict, p_outputright, p_outputright_arrayitem,
p_outputright_expression, p_outputright_list, p_outputlistitems_single,
p_outputlistitems_multiple, p_outputdict, p_outputdictitems_single,
p_outputdictitems_multiple])]
ENCOUNTERED_RULES = set() # Used for merging rules.
def rule_format(docstring, lsize):
rvals = []
for line in docstring.split("\n"):
if ":" in line:
separator = ":"
before, after = line.split(":")
rulename = before.strip()
if rulename in ENCOUNTERED_RULES:
before = ""
separator = "|"
ENCOUNTERED_RULES.add(rulename)
rvals.append(before.ljust(lsize) + separator + after)
elif "|" in line:
before, after = line.split("|")
rvals.append(" ".ljust(lsize) + "|" + after)
return "\n".join(rvals)
def verify_with_global_rules(): #shittycode
glob = set(x for x in globals() if x.startswith("p_") and not x.startswith("p_error"))
local = set(z.func_name for z in reduce(lambda x,y: x + y, map(lambda x:x[-1], RULES)))
if glob != local:
print "--WARNING WARNING WARNING WARNING WARNING WARNING--"
print "-- Rules mismatch! Update fetchparser.py --"
print "--WARNING WARNING WARNING WARNING WARNING WARNING--"
def print_grammar():
print "Fetch context-free grammar:\n"
for section,rules in RULES:
print section + ":"
for rule in rules:
print rule_format(rule.__doc__, 25)
print ""
# Print the context-free grammar in a nice readable format.
print_grammar()
# Compare the manually listed rules being printed with all rules in this file.
verify_with_global_rules()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from six.moves.urllib import parse as urlparse
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
LOG = logging.getLogger(__name__)
class SwiftContainer(resource.Resource):
"""A resource for managing Swift containers.
A container defines a namespace for objects. An object with the same
name in two different containers represents two different objects.
"""
PROPERTIES = (
NAME, X_CONTAINER_READ, X_CONTAINER_WRITE, X_CONTAINER_META,
X_ACCOUNT_META, PURGE_ON_DELETE,
) = (
'name', 'X-Container-Read', 'X-Container-Write', 'X-Container-Meta',
'X-Account-Meta', 'PurgeOnDelete',
)
ATTRIBUTES = (
DOMAIN_NAME, WEBSITE_URL, ROOT_URL, OBJECT_COUNT, BYTES_USED,
HEAD_CONTAINER,
) = (
'DomainName', 'WebsiteURL', 'RootURL', 'ObjectCount', 'BytesUsed',
'HeadContainer',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the container. If not specified, a unique name will '
'be generated.')
),
X_CONTAINER_READ: properties.Schema(
properties.Schema.STRING,
_('Specify the ACL permissions on who can read objects in the '
'container.')
),
X_CONTAINER_WRITE: properties.Schema(
properties.Schema.STRING,
_('Specify the ACL permissions on who can write objects to the '
'container.')
),
X_CONTAINER_META: properties.Schema(
properties.Schema.MAP,
_('A map of user-defined meta data to associate with the '
'container. Each key in the map will set the header '
'X-Container-Meta-{key} with the corresponding value.'),
default={}
),
X_ACCOUNT_META: properties.Schema(
properties.Schema.MAP,
_('A map of user-defined meta data to associate with the '
'account. Each key in the map will set the header '
'X-Account-Meta-{key} with the corresponding value.'),
default={}
),
PURGE_ON_DELETE: properties.Schema(
properties.Schema.BOOLEAN,
_("If True, delete any objects in the container "
"when the container is deleted. "
"Otherwise, deleting a non-empty container "
"will result in an error."),
default=False,
support_status=support.SupportStatus(
version='2015.1')
),
}
attributes_schema = {
DOMAIN_NAME: attributes.Schema(
_('The host from the container URL.'),
type=attributes.Schema.STRING
),
WEBSITE_URL: attributes.Schema(
_('The URL of the container.'),
type=attributes.Schema.STRING
),
ROOT_URL: attributes.Schema(
_('The parent URL of the container.'),
type=attributes.Schema.STRING
),
OBJECT_COUNT: attributes.Schema(
_('The number of objects stored in the container.'),
type=attributes.Schema.INTEGER
),
BYTES_USED: attributes.Schema(
_('The number of bytes stored in the container.'),
type=attributes.Schema.INTEGER
),
HEAD_CONTAINER: attributes.Schema(
_('A map containing all headers for the container.'),
type=attributes.Schema.MAP
),
}
default_client_name = 'swift'
def physical_resource_name(self):
name = self.properties[self.NAME]
if name:
return name
return super(SwiftContainer, self).physical_resource_name()
@staticmethod
def _build_meta_headers(obj_type, meta_props):
"""Returns a new dict.
Each key of new dict is prepended with "X-Container-Meta-".
"""
if meta_props is None:
return {}
return dict(
('X-' + obj_type.title() + '-Meta-' + k, v)
for (k, v) in meta_props.items())
def handle_create(self):
"""Create a container."""
container = self.physical_resource_name()
container_headers = SwiftContainer._build_meta_headers(
"container", self.properties[self.X_CONTAINER_META])
account_headers = SwiftContainer._build_meta_headers(
"account", self.properties[self.X_ACCOUNT_META])
for key in (self.X_CONTAINER_READ, self.X_CONTAINER_WRITE):
if self.properties[key] is not None:
container_headers[key] = self.properties[key]
LOG.debug('SwiftContainer create container %(container)s with '
'container headers %(container_headers)s and '
'account headers %(account_headers)s'
% {'container': container,
'account_headers': account_headers,
'container_headers': container_headers})
self.client().put_container(container, container_headers)
if account_headers:
self.client().post_account(account_headers)
self.resource_id_set(container)
def _get_objects(self):
try:
container, objects = self.client().get_container(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return None
return objects
def _deleter(self, obj=None):
"""Delete the underlying container or an object inside it."""
args = [self.resource_id]
if obj:
deleter = self.client().delete_object
args.append(obj['name'])
else:
deleter = self.client().delete_container
with self.client_plugin().ignore_not_found:
deleter(*args)
def handle_delete(self):
if self.resource_id is None:
return
objects = self._get_objects()
if objects:
if self.properties[self.PURGE_ON_DELETE]:
self._deleter(objects.pop()) # save first container refresh
else:
msg = _("Deleting non-empty container (%(id)s) "
"when %(prop)s is False") % {
'id': self.resource_id,
'prop': self.PURGE_ON_DELETE}
raise exception.ResourceActionNotSupported(action=msg)
# objects is either None (container is gone already) or (empty) list
if objects is not None:
objects = len(objects)
return objects
def check_delete_complete(self, objects):
if objects is None: # resource was not created or is gone already
return True
if objects: # integer >=0 from the first invocation
objs = self._get_objects()
if objs is None:
return True # container is gone already
if objs:
self._deleter(objs.pop())
if objs: # save one last _get_objects() API call
return False
self._deleter()
return True
def handle_check(self):
self.client().get_container(self.resource_id)
def get_reference_id(self):
return six.text_type(self.resource_id)
def _resolve_attribute(self, key):
parsed = list(urlparse.urlparse(self.client().url))
if key == self.DOMAIN_NAME:
return parsed[1].split(':')[0]
elif key == self.WEBSITE_URL:
return '%s://%s%s/%s' % (parsed[0], parsed[1], parsed[2],
self.resource_id)
elif key == self.ROOT_URL:
return '%s://%s%s' % (parsed[0], parsed[1], parsed[2])
elif self.resource_id and key in (
self.OBJECT_COUNT, self.BYTES_USED, self.HEAD_CONTAINER):
try:
headers = self.client().head_container(self.resource_id)
except Exception as ex:
if self.client_plugin().is_client_exception(ex):
LOG.warning(_LW("Head container failed: %s"), ex)
return None
raise
else:
if key == self.OBJECT_COUNT:
return headers['x-container-object-count']
elif key == self.BYTES_USED:
return headers['x-container-bytes-used']
elif key == self.HEAD_CONTAINER:
return headers
def _show_resource(self):
return self.client().head_container(self.resource_id)
def get_live_resource_data(self):
resource_data = super(SwiftContainer, self).get_live_resource_data()
account_data = self.client().head_account()
resource_data['account_data'] = account_data
return resource_data
def parse_live_resource_data(self, resource_properties, resource_data):
swift_reality = {}
# swift container name can't be updated
swift_reality.update({self.NAME: resource_properties.get(self.NAME)})
# PURGE_ON_DELETE property is used only on the heat side and isn't
# passed to swift, so update it from existing resource properties
swift_reality.update({self.PURGE_ON_DELETE: resource_properties.get(
self.PURGE_ON_DELETE)})
swift_reality.update({self.X_CONTAINER_META: {}})
swift_reality.update({self.X_ACCOUNT_META: {}})
for key in [self.X_CONTAINER_READ, self.X_CONTAINER_WRITE]:
swift_reality.update({key: resource_data.get(key.lower())})
for key in resource_properties.get(self.X_CONTAINER_META):
prefixed_key = "%(prefix)s-%(key)s" % {
'prefix': self.X_CONTAINER_META.lower(),
'key': key.lower()}
if prefixed_key in resource_data:
swift_reality[self.X_CONTAINER_META].update(
{key: resource_data[prefixed_key]})
for key in resource_properties.get(self.X_ACCOUNT_META):
prefixed_key = "%(prefix)s-%(key)s" % {
'prefix': self.X_ACCOUNT_META.lower(),
'key': key.lower()}
if prefixed_key in resource_data['account_data']:
swift_reality[self.X_ACCOUNT_META].update(
{key: resource_data['account_data'][prefixed_key]})
return swift_reality
def resource_mapping():
return {
'OS::Swift::Container': SwiftContainer,
}
|
|
from __future__ import division, print_function
import os
import numpy as np
from astropy.table import Table, hstack
from lsst.pipe.base import Struct
from ..sep_stepper import SepLsstStepper, sep_ellipse_mask
from ..stats import get_clipped_sig_task
from ..utils import pixscale, zpt
from .. import utils
from .. import imtools
from .. import primitives as prim
from ..cattools import xmatch
__all__ = ['run']
def run(cfg, reset_mask_planes=False):
"""
Run hugs pipeline using SExtractor for the final detection
and photometry.
Parameters
----------
cfg : hugs_pipe.Config
Configuration object which stores all params
as well as the exposure object.
Returns
-------
results : lsst.pipe.base.Struct
Object containing results:
results.all_detections : catalog of all detections
results.sources : catalog of sources we are keeping
results.exp : exposure object for this run
results.exp_clean : cleaned exposure object for this run
results.success : boolean flag of run status
"""
assert cfg.tract and cfg.patch, 'No patch id given!'
cfg.timer # start timer
############################################################
# Get masked image and check if we have enough good data
############################################################
try:
mi = cfg.exp[cfg.band_detect].getMaskedImage()
mask = mi.getMask()
if cfg.band_mask != cfg.band_detect:
cfg.logger.info('making mask using {}-band'.format(cfg.band_mask))
mi_band_mask = cfg.exp[cfg.band_mask].getMaskedImage().clone()
else:
mi_band_mask = mi
stat_task = get_clipped_sig_task()
cfg.logger.info('good data fraction = {:.2f}'.\
format(cfg.exp.patch_meta.good_data_frac))
if cfg.exp.patch_meta.good_data_frac < cfg.min_good_data_frac:
msg = '***** not enough data in {} {} {}-band!!! ****'
cfg.logger.warning(msg.format(cfg.tract, cfg.patch, cfg.band_mask))
results = _null_return(cfg)
return results
if cfg.band_mask != cfg.band_detect :
min_good = cfg.min_good_data_frac
if cfg.exp.good_data_fraction(cfg.band_mask) < min_good:
msg = '***** not enough data in {} {} {}-band!!! ****'
cfg.logger.warning(
msg.format(cfg.tract, cfg.patch, cfg.band_mask))
results = _null_return(cfg)
return results
############################################################
# Image thesholding at low and high thresholds. In both
# cases, the image is smoothed at the psf scale.
############################################################
#mi_smooth = imtools.smooth_gauss(mi, cfg.psf_sigma)
stats = stat_task.run(mi_band_mask)
if stats.stdev <= 0.0 or np.isnan(stats.stdev):
msg = '***** {} | {} -- stddev = {} !!! ****'
cfg.logger.warning(msg.format(cfg.tract, cfg.patch, stats.stdev))
results = _null_return(cfg)
return results
if cfg.thresh_type.lower() == 'sb':
cfg.logger.info('thresh type set to ' + cfg.thresh_type)
low_th = cfg.thresh_low['thresh']
flux_th = 10**(0.4 * (zpt - low_th)) * pixscale**2
cfg.thresh_low['thresh'] = flux_th / stats.stdev
high_th = cfg.thresh_high['thresh']
flux_th = 10**(0.4 * (zpt - high_th)) * pixscale**2
cfg.thresh_high['thresh'] = flux_th / stats.stdev
elif cfg.thresh_type.lower() == 'stddev':
cfg.logger.info('thresh type set to ' + cfg.thresh_type)
else:
raise Exception('invalid threshold type')
cfg.logger.info('performing low threshold at '
'{:.2f} sigma'.format(cfg.thresh_low['thresh']))
fpset_low = prim.image_threshold(
mi_band_mask, mask=mask, plane_name='THRESH_LOW', **cfg.thresh_low)
cfg.logger.info('performing high threshold at '
'{:.2f} sigma'.format(cfg.thresh_high['thresh']))
fpset_high = prim.image_threshold(
mi_band_mask, mask=mask, plane_name='THRESH_HIGH',
**cfg.thresh_high)
############################################################
# Get "cleaned" image, with noise replacement
############################################################
cfg.logger.info('generating cleaned exposure')
exp_clean = prim.clean(cfg.exp[cfg.band_detect],
fpset_low,
**cfg.clean)
############################################################
# Remove small sources using HSC pipeline detection map
############################################################
if cfg.hsc_small_sources_r_max is not None:
if cfg.hsc_small_sources_r_max > 0:
cfg.logger.info('removed small sources with HSC detection map')
exp_clean = prim.remove_small_sources_thresholding(
exp_clean, cfg.hsc_small_sources_r_max, cfg.rng)
mi_clean = exp_clean.getMaskedImage()
mask_clean = mi_clean.getMask()
############################################################
# use sep to find and mask point-like sources
############################################################
if cfg.sep_steps is not None:
sep_stepper = SepLsstStepper(config=cfg.sep_steps)
sep_stepper.setup_image(exp_clean, cfg.rng)
step_mask = cfg.exp.get_mask_array(
planes=['BRIGHT_OBJECT', 'NO_DATA', 'SAT'])
sep_sources, _ = sep_stepper.run('sep_point_sources',
mask=step_mask)
cfg.logger.info('generating and applying sep ellipse mask')
r_min = cfg.sep_min_radius
sep_sources = sep_sources[sep_sources['flux_radius'] < r_min]
ell_msk = sep_ellipse_mask(
sep_sources, sep_stepper.image.shape, cfg.sep_mask_grow)
nimage_replace = sep_stepper.noise_image[ell_msk]
mi_clean.getImage().getArray()[ell_msk] = nimage_replace
mask_clean.addMaskPlane('SMALL')
mask_clean.getArray()[ell_msk] += mask_clean.getPlaneBitMask('SMALL')
############################################################
# Detect sources and measure props with SExtractor
############################################################
cfg.logger.info('detecting in {}-band'.format(cfg.band_detect))
label = '{}-{}-{}'.format(cfg.tract, cfg.patch[0], cfg.patch[-1])
cfg.logger.info('cleaning non-detection bands')
replace = cfg.exp.get_mask_array(cfg.band_detect)
for band in cfg.bands:
if band!=cfg.band_detect:
mi_band = cfg.exp[band].getMaskedImage()
noise_array = utils.make_noise_image(mi_band, cfg.rng)
mi_band.getImage().getArray()[replace] = noise_array[replace]
sources = Table()
for band in cfg.bands:
cfg.logger.info('measuring in {}-band'.format(band))
dual_exp = None if band==cfg.band_detect else cfg.exp[band]
sources_band = prim.detect_sources(
exp_clean, cfg.sex_config, cfg.sex_io_dir, label=label,
dual_exp=dual_exp,
delete_created_files=cfg.delete_created_files,
original_fn=cfg.exp.fn[cfg.band_detect])
if len(sources_band)>0:
sources = hstack([sources, sources_band])
else:
cfg.logger.warn('**** no sources found by sextractor ****')
results = _null_return(cfg, exp_clean)
return results
############################################################
# Verify detections in other bands using SExtractor
############################################################
all_detections = sources.copy()
for band in cfg.band_verify:
cfg.logger.info('verifying dection in {}-band'.format(band))
sources_verify = prim.detect_sources(
cfg.exp[band], cfg.sex_config, cfg.sex_io_dir,
label=label, delete_created_files=cfg.delete_created_files,
original_fn=cfg.exp.fn[band])
if len(sources_verify)>0:
match_masks, _ = xmatch(
sources, sources_verify, max_sep=cfg.verify_max_sep)
txt = 'cuts: {} out of {} objects detected in {}-band'.format(
len(match_masks[0]), len(sources), band)
cfg.logger.info(txt)
if len(match_masks[0])==0:
cfg.logger.warn(
'**** no matched sources with '+band+' ****')
results = _null_return(cfg, exp_clean)
return results
sources = sources[match_masks[0]]
else:
cfg.logger.warn('**** no sources detected in '+band+' ****')
results = _null_return(cfg, exp_clean)
return results
mask_fracs = utils.calc_mask_bit_fracs(exp_clean)
cfg.exp.patch_meta.small_frac = mask_fracs['small_frac']
cfg.exp.patch_meta.cleaned_frac = mask_fracs['cleaned_frac']
cfg.exp.patch_meta.bright_obj_frac = mask_fracs['bright_object_frac']
cfg.logger.info('measuring mophology metrics')
prim.measure_morphology_metrics(exp_clean.getImage().getArray(),
sources)
cfg.logger.info('task completed in {:.2f} min'.format(cfg.timer))
results = Struct(all_detections=all_detections,
sources=sources,
hugs_exp=cfg.exp,
exp_clean=exp_clean,
success=True,
synths=cfg.exp.synths)
if reset_mask_planes:
cfg.reset_mask_planes()
return results
except:
cfg.logger.critical(
'tract - patch {} - {} failed'.format(cfg.tract, cfg.patch))
results = _null_return(cfg)
return results
def _null_return(config, exp_clean=None):
config.reset_mask_planes()
return Struct(all_detections=None,
sources=None,
hugs_exp=config.exp,
exp_clean=exp_clean,
success=False,
synths=None)
|
|
# -*- coding: utf-8 -*-
"""Provide authentication and authorization policy."""
__all__ = [
'VALID_API_KEY',
'VALID_PASS_KEY',
'VALID_TOKEN',
'APIKeyAuthenticationPolicy',
'BearerAuthenticationPolicy',
'GlobalKeyAuthorizationPolicy',
'HybridAuthenticationPolicy',
'PassKeyAuthenticationPolicy',
'get_bearer_token',
]
import logging
logger = logging.getLogger(__name__)
import urlparse
import re
import zope.interface as zi
from paste import httpheaders
from pyramid import authentication
from pyramid import interfaces
from pyramid import security
from . import bearer
from . import model
TOKEN_PREFIX = u'simpleauth.policy.token:'
VALID_TOKEN = re.compile(r'^[a-z0-9]{' + str(model.ACCESS_TOKEN_LENGTH) + '}$')
VALID_API_KEY = re.compile(r'^\w{40}$')
VALID_PASS_KEY = re.compile(r'^\w{40}$')
def get_bearer_token(request):
"""Return a bearer token provided as an authentication header."""
# Compose.
http_auth = httpheaders.AUTHORIZATION
prefix = TOKEN_PREFIX
# Try and get the bearer token data from the auth header.
token = None
try:
auth_method, data = http_auth(request.environ).split(' ', 1)
except ValueError: # not enough values to unpack
pass
else:
if auth_method.lower() == 'bearer':
token = data.strip()
return token
@zi.implementer(interfaces.IAuthenticationPolicy)
class BearerAuthenticationPolicy(authentication.CallbackAuthenticationPolicy):
"""Authenticate using a `bearer token`_.
_`bearer token`: https://tools.ietf.org/html/rfc6750#section-2.1
"""
def __init__(self, prefix='auth.', callback=None, debug=False, **kwargs):
self.callback = callback
self.prefix = prefix or ''
self.userid_key = prefix + 'userid'
self.debug = debug
self.get_token = kwargs.get('get_token', get_bearer_token)
self.get_canonical_id_cls = kwargs.get('get_canonical_id_cls',
bearer.GetCanonicalIDFromBearerToken)
def forget(self, request):
"""Noop."""
return []
def remember(self, request, principal, **kw):
"""Noop."""
return []
def unauthenticated_userid(self, request):
"""Try and get a canonical id by bearer token."""
# Prepare the return value.
canonical_id = None
# If there's a bearer token in the request, use it to lookup the
# corresponding canonical id.
access_token = self.get_token(request)
if access_token:
lookup_utility = self.get_canonical_id_cls(request)
canonical_id = lookup_utility(access_token)
return canonical_id
@zi.implementer(interfaces.IAuthenticationPolicy)
class HybridAuthenticationPolicy(authentication.SessionAuthenticationPolicy):
"""First try `bearer token`_, then try `session`_.
_`bearer token`: https://tools.ietf.org/html/rfc6750#section-2.1
_`session`: http://pyramid.readthedocs.org/en/latest/narr/sessions.html
"""
def __init__(self, prefix='auth.', callback=None, debug=False, **kwargs):
self.callback = callback
self.prefix = prefix or ''
self.userid_key = prefix + 'userid'
self.debug = debug
self.get_token = kwargs.get('get_token', get_bearer_token)
self.get_canonical_id_cls = kwargs.get('get_canonical_id_cls',
bearer.GetCanonicalIDFromBearerToken)
def unauthenticated_userid(self, request):
"""Try and get a bearer token, fallback on a canonical id in the session."""
# Prepare the return value.
canonical_id = None
# If there's a bearer token in the request, use it to lookup the
# corresponding canonical id.
access_token = self.get_token(request)
if access_token:
lookup_utility = self.get_canonical_id_cls(request)
canonical_id = lookup_utility(access_token)
# Fallback on the session.
if not canonical_id:
canonical_id = request.session.get(self.userid_key)
return canonical_id
@zi.implementer(interfaces.IAuthenticationPolicy)
class APIKeyAuthenticationPolicy(authentication.CallbackAuthenticationPolicy):
"""A Pyramid authentication policy which obtains credential data from the
``request.headers['api_key']``.
"""
def __init__(self, header_key, **kwargs):
self.header_key = header_key
self.valid_key = kwargs.get('valid_key', VALID_API_KEY)
def unauthenticated_userid(self, request):
"""The ``api_key`` value found within the ``request.headers``."""
api_key = request.headers.get(self.header_key, None)
if api_key and self.valid_key.match(api_key):
return api_key.decode('utf8')
def remember(self, request, principal, **kw):
"""A no-op. There's no way to remember the user.
>>> policy = APIKeyAuthenticationPolicy(None)
>>> policy.remember('req', 'ppl')
[]
"""
return []
def forget(self, request):
"""A no-op. There's no user to forget.
>>> policy = APIKeyAuthenticationPolicy(None)
>>> policy.forget('req')
[]
"""
return []
@zi.implementer(interfaces.IAuthenticationPolicy)
class PassKeyAuthenticationPolicy(authentication.BasicAuthAuthenticationPolicy):
"""A Pyramid authentication policy which validates that the basic auth
password is a valid pass key and then uses that pass key as user id.
"""
def __init__(self, **kwargs):
self.check = kwargs.get('check', lambda u, p, r: [p])
self.realm = kwargs.get('realm', 'Realm')
self.debug = kwargs.get('debug', False)
def unauthenticated_userid(self, request):
"""Return the basic auth password if present and valid."""
credentials = self._get_credentials(request)
if credentials:
pass_key = credentials[1]
if pass_key and VALID_PASS_KEY.match(pass_key):
return pass_key
@zi.implementer(interfaces.IAuthorizationPolicy)
class GlobalKeyAuthorizationPolicy(object):
"""Global authorization policy that ignores the context and just checks
whether the target key is in the principals list.
"""
def __init__(self, key):
self.key = key
def permits(self, context, principals, permission):
return self.key in principals
def principals_allowed_by_permission(self, context, permission):
raise NotImplementedError
|
|
"""Contains classes solr_query, node and sisters."""
# -*- coding: utf-8 -*-
from urllib.request import urlopen
import simplejson
import json
import numpy as np
import pandas as pd
import contextlib
# import copy
class solr_query():
"""
A solr_query class that stores URLs.
Attbts:
solr_url - the main solr_url
"""
def __init__(self, solr_url, query):
"""Initialize the solr_query object."""
self.solr_url = solr_url
self.query = query
def set_solr_url(self, url):
"""Assign a url to the solr_query object."""
self.solr_url = url
def add_query_url(self, url):
"""Add a query url to the solr_query object."""
self.query = url
def open_query(self, p=0, timeout=10):
"""
Given a query, append it to the main url.
Open URL and use simplejson to load the results
"""
timer = 10 # don't try more than 10x per website
if p:
print(self.solr_url + self.query)
while timer > 0:
try:
with contextlib.closing(urlopen(self.solr_url +
self.query, timeout=timeout)) as conn:
return simplejson.load(conn)
except:
# raise Warning('URL is invalid or may have timed out')
timer -= 1
pass
# raise an error if the timer reached 0
if timer == 0:
print(self.solr_url + self.query)
raise Warning('Url could not be contacted or is invalid')
class node():
"""
A node is intended to be a single ontology term.
Attributes:
name - wbbt id
parents
genes
similarity - no. of genes assoc. with this node divided
by the set of genes of its sister set
drop -- whether to drop or not
good_name -- human readable plus wbbt
QUERIES FOR RELATIONS AND GENES ARE LAMBDA FUNCTIONS
query_relation(x) -- gets families of tissue x
query_genes(x) -- gets genes assoc with x
query_readable
"""
def __init__(self, name):
"""Initialization function."""
self.name = name
self.daughters = []
self.parents = []
self.genes = []
self.similarity = 0
self.drop = False
self.good_name = ''
def get_name(self, human_readable):
"""Generate a good name (human readable + WBid)."""
if human_readable == '':
print('warning, empty human readable name')
self.good_name = human_readable + ' ' + self.name
def add_daughter(self, daughter):
"""Add a daughter to this node."""
self.daughters.append(daughter)
self.daughters = list(set(self.daughters)) # prevent redundancy
def add_parent(self, parent):
"""Add a parent to this node."""
self.parents.append(parent)
self.parents = list(set(self.parents))
def add_annotation(self, gene):
"""Add annotation to this node."""
self.genes.append(gene)
self.genes = list(set(self.genes))
def throw_away(self):
"""Set the `drop` variable to True."""
self.drop = True
def calc_similarity(self, sim):
"""Calculate similarity."""
self.similarity = sim
def find_family(self, solr_url, query_relation, p=0):
"""
Find the family for this node by using solr_url and query_relation.
query_relation(x) --lambda function
"""
# get the json object
rsp_rlshp = solr_query(solr_url, query_relation(self.name))
# extract the array with all the right information
array_of_rlshps = rsp_rlshp.open_query(p=p)['response']['docs'][0]
# go through the array, turning each line into a dictionary
# these mini-dictionaries contain the edges between nodes
for j in json.loads(array_of_rlshps['topology_graph_json'])['edges']:
# if the object isnt the same as the wbbt, object is parent to wbbt
# if object is same as wbbt, wbbt is parent to subject
if self.name != j['obj']:
self.add_parent(j['obj'])
else:
self.add_daughter(j['sub'])
def find_genes(self, solr_url, query_genes):
"""
For a given wbbt, find the genes associated with it.
query_genes(x) -- lambda function!
"""
rsp_genes = solr_query(solr_url, query_genes(self.name))
# extract the array with all the right information
array_of_genes = rsp_genes.open_query()['response']['docs']
# go through the array, turning each line into a dictionary
for entry in array_of_genes:
self.genes.append(entry['id'][3:]) # remove WB: from the string
self.genes = list(set(self.genes))
class sisters(object):
"""
A sister object that contains related terms.
A sister object that is meant to contain a set of terms that are related
Sisters are defined as a set of nodes that share a single parent
If a node is multiparent, it can have as many different sister sets as
parents.
Attributes:
parent -- the parent for this set
sisters -- set of `node` objects that are related by the same parent
geneset -- total set of genes associated with these sisters
threshold -- similarity threshold that specifies above which similarity
sisters must be killed
dropsisters -- boolean
dropped -- an array that keeps track of all sisters ever dropped
"""
def __init__(self, parent, threshold):
"""Initialize function."""
self.parent = parent
self.sisters = []
self.geneset = []
self.threshold = threshold
self.dropsisters = 0
self.dropped = []
def add_sister(self, sister):
"""Add a sister."""
if self.sisters:
self.sisters.append(sister)
else:
self.sisters = [sister]
self.geneset = list(set(self.geneset+(sister.genes)))
def add_sisters(self, sisters):
"""Add multiple sisters."""
self.sisters = list(set(self.sisters+sisters))
for sister in sisters:
self.geneset = self.geneset+sister.genes
self.geneset = list(set(self.geneset))
def add_dropped(self, sister):
"""Add a sister to the `dropped` list."""
if sister not in list:
self.dropped.append(sister)
else:
self.dropped = self.dropped+sister
def calc_similarity(self, method):
"""
Calculate the family wise similarity for this object.
A method to calculate the similarity of a set of sisters to each other
by finding the cardinality of the total gene set and the cardinality of
the gene set for each node
Depending on the method, the sisters.dropsisters value is modified if
the sisters are too similar to each other
"""
if len(self.sisters) == 0:
return 0
if self.geneset == 0:
return 1
if method not in ['avg', 'any']:
raise ValueError('method must be one of \'avg\' or \'any\'')
avg = 0
for sister in self.sisters:
sim = len(sister.genes)/len(self.geneset)
sister.calc_similarity(sim)
if method == 'any':
if sim > self.threshold:
self.dropsisters = 1
avg += sim
avg = avg/len(self.sisters)
if method == 'avg':
if avg > self.threshold:
self.dropsisters = 1
def kill(self):
"""If dropsister variable is 1, set `dropped` = 'sisters'."""
if self.dropsisters == 1:
self.dropped = self.sisters
def trim(self, val):
"""If sister doesn't have `val` genes assoc. with it, drop it."""
if len(self.sisters) == 0:
return
for sister in self.sisters:
if len(sister.genes) < val:
self.dropped.append(sister)
class ontology():
"""An ontological object."""
def __init__(self, name, cutoff, threshold, method, solr_url):
"""Initialization function."""
self.name = name
self.threshold = threshold
self.method = method
self.nodes = {}
self.family = {}
self.solr_url = solr_url
self.query_min_cutoff = 5
self.cutoff = cutoff
self.dropped = {}
self.good = {}
def set_min_cutoff(self, x):
"""Set minimum gene cutoff below which nodes are not fetched."""
self.query_min_cutoff = x
def add_nodes(self, query_terms, query_readable):
"""Add nodes from solr database."""
sq = solr_query(self.solr_url, query_terms(self.query_min_cutoff))
rsp_terms = sq.open_query()
sd = solr_query(self.solr_url, query_readable)
rsp_read = sd.open_query()
i = 0
for k in enumerate(rsp_terms['facet_counts']
['facet_fields']['regulates_closure']):
if i % 2 == 0:
n = node(k[1])
if n.name not in self.nodes:
self.nodes[n.name] = n
self.nodes[n.name].get_name(query_readable)
if n.name not in self.family:
self.family[n.name] = sisters(n.name, self.threshold)
i += 1
for k, val in enumerate(rsp_read['response']['docs']):
if val['id'] not in self.nodes:
continue
self.nodes[val['id']].get_name(val['annotation_class_label'])
def find_node_family(self, lambda_query_rlshp, p=0):
"""Find the nodes that are related to this one."""
for n in iter(self.nodes):
self.nodes[n].find_family(self.solr_url, lambda_query_rlshp, p=p)
def find_node_annotations(self, lambda_query_genes):
"""Fetch the annotations for this node."""
for n in iter(self.nodes):
self.nodes[n].find_genes(self.solr_url, lambda_query_genes)
if len(self.nodes[n].genes) < self.cutoff:
self.dropped[self.nodes[n].name] = self.nodes[n]
def annotate_nodes(self, lambda_query_rlshp, lambda_query_genes):
"""Annotate this node with a family and with annotations."""
self.find_node_family(lambda_query_rlshp)
self.find_node_annotations(lambda_query_genes)
def find_families(self):
"""Figure out the family structure for each node."""
for node in self.nodes:
n = self.nodes[node]
for daughter in n.daughters:
if daughter not in self.nodes:
continue
# if 'WBbt:0002367' == daughter:
# print('hi')
if len(self.nodes[daughter].genes) < self.threshold:
# add sister
self.family[n.name].add_sister(self.nodes[daughter])
# place it in sister.dropped
self.family[n.name].add_dropped(self.nodes[daughter])
# but also in self.dropped
self.dropped[n.name] = n
else:
self.family[n.name].add_sister(self.nodes[daughter])
def calculate_similarities(self):
"""Calculate the family-wise similarity."""
for parent in self.family:
self.family[parent].calc_similarity(self.method)
def kill(self):
"""Remove whatever nodes fulfill the sisters.kill criterion."""
for parent in self.family:
self.family[parent].kill()
for killed in self.family[parent].dropped:
if killed.name in self.nodes:
self.dropped[killed.name] = killed
def ceiling(self):
"""If a node has all its complement of daughters, kill it."""
for parent in self.family:
if parent not in self.nodes:
continue
if len(self.family[parent].sisters) == 0:
continue
if len(self.family[parent].dropped) == 0:
self.dropped[self.nodes[parent].name] = self.nodes[parent]
def find_good(self):
"""Fetch the surviving nodes."""
for node in self.nodes:
if node not in self.dropped:
self.good[self.nodes[node].good_name] = self.nodes[node]
def build_dictionary(wbbts, tissue_array, genes):
"""Build the dictionary from a list of terms and wbbts."""
# given a list of tissues, find the genes associated with each tissue and
# place them in a vector.....
mat = np.zeros(shape=(len(genes), len(wbbts)))
d = {}
for i, gene in enumerate(genes):
d[gene] = i
# for j, tissue in enumerate(wbbts):
# if gene in wbbts[tissue].genes:
# mat[i, j] = 1
for j, tissue in enumerate(wbbts):
for gene in wbbts[tissue].genes:
mat[d[gene], j] = 1
cols = tissue_array
df = pd.DataFrame(mat, columns=cols)
df.insert(0, 'wbid', genes)
# drop the root term, for some reason it causes problems with hgt
if 'C. elegans Cell and Anatomy WBbt:0000100' in df.columns:
df.drop('C. elegans Cell and Anatomy WBbt:0000100', axis=1,
inplace=True)
return df
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# # # # # # # #
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
if __name__ == '__main__':
# Raymond:
# I have split up the URLs into 2 different variables to make life easier
# solr_url contains the first part
# query_xxx contains the second. However, query_xx can be a lambda function
# basically, at a point in the string, I have written something like...
# 'select?qt=standard&indent={0}'.format(x) -- the {0} is replaced by x
# this allows me to modify the query in predictable ways.
# hope this is clear.
import argparse
import sys
parser = argparse.ArgumentParser(description='Run Dictionary Maker')
parser.add_argument("threshold", help='The redundancy threshold',
type=float)
parser.add_argument('cutoff', help='The annotation cutoff for each term',
type=int)
parser.add_argument("fname",
help='Filename (complete with path) to save to',
type=str)
parser.add_argument("-m", '--method',
help='method - defaults to \'any\' if not specified',
type=str)
parser.add_argument("-mc", '--mincutoff',
help='The minimum cutoff to fetch. Defaults to 2.',
type=int)
parser.add_argument("-su", '--solrurl',
help='The main body of the solr url.', type=str)
parser.add_argument("-o", "--ontology",
help='One of `phenotype`, `tissue` or `gene`. Only\
works if --solrurl has not been specified',
type=str, default='anatomy',
choices=['anatomy', 'phenotype', 'go'])
args = parser.parse_args()
# main solr url
if args.solrurl:
solr_url = args.solrurl
else:
# solr_url = 'http://wobr.caltech.edu:8082/solr/anatomy/'
s = 'http://wobr.caltech.edu:8082/solr/{0}/'
solr_url = s.format(args.ontology)
# queries must be lambda functions
# query for terms. Finds terms that have x or more annotating genes
def query_terms(x, ontology=args.ontology):
"""Search solr for terms (nodes) in the ontology."""
if ontology != 'go':
s = 'select?qt=standard&indent=on&wt=json&version=2.2&fl=' +\
'id&start=0&rows=0&q=document_category:bioentity' +\
'&facet=true&facet.field=regulates_closure&' +\
'facet.limit=-1&facet.mincount={0}&facet.sort' +\
'=count&fq=source:%22WB%22&fq=-qualifier:%22not%22'
else:
s = 'select?qt=standard&indent=on&wt=json&version=2.2&fl=' +\
'id&start=0&rows=1&q=document_category:bioentity&facet=' +\
'true&facet.field=regulates_closure&facet.limit=-1&' +\
'facet.mincount={0}&facet.sort=count&fq=source:%22WB' +\
'%22&fq=taxon:%22NCBITaxon:6239%22&fq=-qualifier:%22not%22'
return s.format(x)
def query_relation(x, ontology=args.ontology):
"""
query for relationships between nodes.
given a wbbt ID `x`, find the nodes connected to it.
Links are slightly different for [anatomy, phenotype] and GO, because
in WormBase, the GO solr database includes all other worm species as
well.
"""
if ontology != 'go':
s = "select?qt=standard&fl=topology_graph_json&" +\
"version=2.2&wt=json&indent=on&rows=1&q=id:" +\
"%22{0}%22&fq=document_category:%22ontology_class%22"
else:
s = "select?qt=standard&fl=topology_graph_json&" +\
"version=2.2&wt=json&indent=on&rows=1&q=id:" +\
"%22{0}%22&fq=document_category:%22ontology_class%22"
return s.format(x)
def query_genes(x, ontology=args.ontology):
"""
find the genes associated with every node.
given a wbbt ID `x`, open URL that contains genes assoc. with it.
"""
if ontology != 'go':
s = "select?qt=standard&indent=on&wt=json&version=2.2&" +\
"fl=id&start=0&rows=10000&q=document_category:bioentity" +\
"&fq=source:%22WB%22&fq=-qualifier:%22not%22&" +\
"fq=regulates_closure:%22{0}%22"
else:
s = "select?qt=standard&indent=on&wt=json&version=2.2&" +\
"fl=id&start=0&rows=10000&q=document_category:bioentity" +\
"&fq=source:%22WB%22&fq=taxon:%22NCBITaxon:6239%22" +\
"&fq=-qualifier:%22not%22&" +\
"fq=regulates_closure:%22{0}%22"
return s.format(x)
# query for readable names
query_readable = "select?qt=standard&fl=id,annotation_class_label" +\
"&version=2.2&wt=json&indent=on&rows=100000&q=id:" +\
"*&fq=document_category:ontology_class&" +\
"fq=-is_obsolete:true"
queries = [query_terms, query_relation, query_genes, query_readable]
threshold = args.threshold
cutoff = args.cutoff
if args.method:
method = args.method
else:
method = 'any'
if args.mincutoff:
min_annot = args.mincutoff
else:
min_annot = 2
trial1 = ontology('tissue_ontology', cutoff, threshold, method, solr_url)
print('Object made')
print('Min cutoff set at: {0}....'.format(min_annot))
sys.stdout.flush()
trial1.set_min_cutoff(min_annot)
print('Fetching nodes.....')
sys.stdout.flush()
trial1.add_nodes(query_terms, query_readable)
print('Annotating nodes')
sys.stdout.flush()
trial1.find_node_annotations(query_genes)
print('Finding node families...')
sys.stdout.flush()
trial1.find_node_family(query_relation)
print('Generating node family representation...')
sys.stdout.flush()
trial1.find_families()
message = 'Calculating similarities and \
removing nodes with more than {0:.2} similarity...'
print(message.format(threshold))
sys.stdout.flush()
trial1.calculate_similarities()
message = 'killing nodes that have less than {0} annotations...'
print(message.format(cutoff))
sys.stdout.flush()
trial1.kill()
print('Applying ceiling...')
sys.stdout.flush()
trial1.ceiling()
print('Generating final list of terms...')
trial1.find_good()
print('No. of terms in dictionary: {0}'.format(len(trial1.good)))
# extract keys
print('Generating file at {0}'.format(args.fname))
tissues = []
genes = []
for n in trial1.good:
tissues.append(n)
# print(n)
genes = genes+trial1.good[n].genes
genes = list(set(genes))
df = build_dictionary(trial1.good, tissues, genes)
df.to_csv(args.fname, index=False)
|
|
import sys
from flexmock import flexmock
import inject
from mcloud.events import EventBus
from mcloud.txdocker import IDockerClient, DockerTwistedClient
from mcloud.util import txtimeout
import pytest
from mcloud.remote import Server, Client, ApiError, Task, ApiRpcServer
from twisted.internet import reactor, defer
from twisted.python import log
import txredisapi as redis
class MockServer(Server):
message = None
def on_message(self, client, message, isBinary=False):
self.message = message
class MockClient(Client):
message = None
def on_message(self, message, isBinary=False):
self.message = message
def sleep(secs):
d = defer.Deferred()
reactor.callLater(secs, d.callback, None)
return d
#@pytest.inlineCallbacks
#def test_exchange():
# inject.clear()
#
# #log.startLogging(sys.stdout)
#
# server = MockServer(port=9999)
# server.bind()
#
# assert len(server.clients) == 0
#
# client = MockClient(port=9999)
# yield client.connect()
#
# assert len(server.clients) == 1
#
# log.msg('Sending data')
# yield client.send('boo')
#
# yield sleep(0.1)
#
# assert server.message == 'boo'
#
# yield server.clients[0].sendMessage('baz')
#
# yield sleep(0.1)
#
# assert client.message == 'baz'
#
# client.shutdown()
# server.shutdown()
#
# yield sleep(0.1)
@pytest.inlineCallbacks
def test_request_response():
#-----------------------------------
# preparations
#-----------------------------------
# cleanup a bit
inject.clear()
def my_config(binder):
binder.bind('settings', None)
inject.configure(my_config)
# log.startLogging(sys.stdout)
server = Server(port=9998, no_ssl=True)
server.bind()
client = Client(port=9998, no_ssl=True)
yield client.connect()
response = yield client.call_sync('ping')
assert response == 'pong'
client.shutdown()
server.shutdown()
@pytest.inlineCallbacks
def test_request_response_no_such_command():
#-----------------------------------
# preparations
#-----------------------------------
# cleanup a bit
inject.clear()
def my_config(binder):
binder.bind('settings', None)
inject.configure(my_config)
log.startLogging(sys.stdout)
server = Server(port=9996, no_ssl=True)
server.bind()
client = Client(port=9996, no_ssl=True)
yield client.connect()
with pytest.raises(ApiError):
yield client.call_sync('hoho')
client.shutdown()
server.shutdown()
@pytest.inlineCallbacks
def test_tasks():
#-----------------------------------
# preparations
#-----------------------------------
# cleanup a bit
inject.clear()
rc = yield redis.Connection(dbid=2)
eb = EventBus(rc)
yield eb.connect()
def my_config(binder):
binder.bind(redis.Connection, rc)
binder.bind(EventBus, eb)
binder.bind('settings', None)
inject.configure(my_config)
yield rc.flushdb()
api = inject.instance(ApiRpcServer)
#-----------------------------------
# Test itself
#-----------------------------------
# this will emulate some long-running process
task_defered = defer.Deferred()
# this is mock that will execute our long-running process
task = flexmock()
task.should_receive('foo').with_args(int, 123, 'test').once().and_return(task_defered)
# register our task
api.tasks['baz'] = task.foo
# start server -> real server on tcp port
server = Server(port=9997, no_ssl=True)
server.bind()
# real client connecton here
client = Client(port=9997, no_ssl=True)
yield client.connect()
# client calls a task
task = Task('baz')
yield client.call(task, 123, 'test')
yield sleep(0.1)
assert task.id > 0
assert task.name == 'baz'
assert task.is_running is True
assert len(server.rpc_server.tasks_running) == 1
assert server.rpc_server.tasks_running[task.id]['name'] == 'baz'
assert len(server.rpc_server.task_list()) == 1
# no data should be on client
yield sleep(0.1)
assert task.data == []
assert task.response is None
# now server sends some progress
yield server.clients[0].send_event('task.progress.%s' % task.id, 'nami-nami')
# and client should receive this data
yield sleep(0.1)
assert task.data == ['nami-nami']
assert task.is_running is True
assert task.response is None
# now our long-running process stopped and returned some result
yield task_defered.callback('this is respnse')
# and client should recieve this resul
yield sleep(0.1)
assert task.data == ['nami-nami']
assert task.is_running == False
assert task.response == 'this is respnse'
assert len(server.rpc_server.tasks_running) == 0
assert len(server.rpc_server.task_list()) == 0
#-----------------------------------
# Cleanup
#-----------------------------------
client.shutdown()
server.shutdown()
yield sleep(0.1)
@pytest.inlineCallbacks
def test_task_terminate():
#-----------------------------------
# preparations
#-----------------------------------
# cleanup a bit
inject.clear()
rc = yield redis.Connection(dbid=2)
eb = EventBus(rc)
yield eb.connect()
def my_config(binder):
binder.bind(redis.Connection, rc)
binder.bind(EventBus, eb)
binder.bind('settings', None)
inject.configure(my_config)
yield rc.flushdb()
api = inject.instance(ApiRpcServer)
#-----------------------------------
# Test itself
#-----------------------------------
# this will emulate some long-running process
task_defered = defer.Deferred()
# this is mock that will execute our long-running process
task = flexmock()
task.should_receive('foo').with_args(int, 123, 'test').once().and_return(task_defered)
# register our task
api.tasks['baz'] = task.foo
# start server -> real server on tcp port
server = Server(port=9987, no_ssl=True)
server.bind()
# real client connecton here
client = Client(port=9987, no_ssl=True)
yield client.connect()
# client calls a task
task = Task('baz')
yield client.call(task, 123, 'test')
yield sleep(0.1)
assert task.id > 0
assert task.name == 'baz'
assert task.is_running is True
# now client terminates the task
yield sleep(0.1)
client.terminate_task(task.id)
yield sleep(0.1)
assert task.is_running is False
#-----------------------------------
# Cleanup
#-----------------------------------
client.shutdown()
server.shutdown()
yield sleep(0.1)
|
|
"""
PlexAPI MyPlex
"""
import plexapi, requests
from plexapi import TIMEOUT, log, utils
from plexapi.exceptions import BadRequest, NotFound, Unauthorized
from plexapi.utils import cast, toDatetime
from requests.status_codes import _codes as codes
from threading import Thread
from xml.etree import ElementTree
class MyPlexUser(object):
""" Logs into my.plexapp.com to fetch account and token information. This
useful to get a token if not on the local network.
"""
SIGNIN = 'https://my.plexapp.com/users/sign_in.xml'
def __init__(self, data, initpath=None):
self.initpath = initpath
self.email = data.attrib.get('email')
self.id = data.attrib.get('id')
self.thumb = data.attrib.get('thumb')
self.username = data.attrib.get('username')
self.title = data.attrib.get('title')
self.cloudSyncDevice = data.attrib.get('cloudSyncDevice')
self.authenticationToken = data.attrib.get('authenticationToken')
self.queueEmail = data.attrib.get('queueEmail')
self.queueUid = data.attrib.get('queueUid')
def resources(self):
return MyPlexResource.fetch_resources(self.authenticationToken)
def getResource(self, search, port=32400):
""" Searches server.name, server.sourceTitle and server.host:server.port
from the list of available for this PlexUser.
"""
return _findResource(self.resources(), search, port)
def devices(self):
return MyPlexDevice.fetch_resources(self.authenticationToken)
@classmethod
def signin(cls, username, password):
if 'X-Plex-Token' in plexapi.BASE_HEADERS:
del plexapi.BASE_HEADERS['X-Plex-Token']
auth = (username, password)
log.info('POST %s', cls.SIGNIN)
response = requests.post(cls.SIGNIN, headers=plexapi.BASE_HEADERS, auth=auth, timeout=TIMEOUT)
if response.status_code != requests.codes.created:
codename = codes.get(response.status_code)[0]
if response.status_code == 401:
raise Unauthorized('(%s) %s' % (response.status_code, codename))
raise BadRequest('(%s) %s' % (response.status_code, codename))
data = ElementTree.fromstring(response.text.encode('utf8'))
return cls(data)
class MyPlexAccount(object):
""" Represents myPlex account if you already have a connection to a server. """
def __init__(self, server, data):
self.authToken = data.attrib.get('authToken')
self.username = data.attrib.get('username')
self.mappingState = data.attrib.get('mappingState')
self.mappingError = data.attrib.get('mappingError')
self.mappingErrorMessage = data.attrib.get('mappingErrorMessage')
self.signInState = data.attrib.get('signInState')
self.publicAddress = data.attrib.get('publicAddress')
self.publicPort = data.attrib.get('publicPort')
self.privateAddress = data.attrib.get('privateAddress')
self.privatePort = data.attrib.get('privatePort')
self.subscriptionFeatures = data.attrib.get('subscriptionFeatures')
self.subscriptionActive = data.attrib.get('subscriptionActive')
self.subscriptionState = data.attrib.get('subscriptionState')
def resources(self):
return MyPlexResource.fetch_resources(self.authToken)
def getResource(self, search, port=32400):
""" Searches server.name, server.sourceTitle and server.host:server.port
from the list of available for this PlexAccount.
"""
return _findResource(self.resources(), search, port)
class MyPlexResource(object):
RESOURCES = 'https://plex.tv/api/resources?includeHttps=1'
SSLTESTS = [(True, 'uri'), (False, 'http_uri')]
def __init__(self, data):
self.name = data.attrib.get('name')
self.accessToken = data.attrib.get('accessToken')
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.createdAt = toDatetime(data.attrib.get('createdAt'))
self.lastSeenAt = toDatetime(data.attrib.get('lastSeenAt'))
self.provides = data.attrib.get('provides')
self.owned = cast(bool, data.attrib.get('owned'))
self.home = cast(bool, data.attrib.get('home'))
self.synced = cast(bool, data.attrib.get('synced'))
self.presence = cast(bool, data.attrib.get('presence'))
self.connections = [ResourceConnection(elem) for elem in data if elem.tag == 'Connection']
def __repr__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name.encode('utf8'))
def connect(self, ssl=None):
# Only check non-local connections unless we own the resource
connections = sorted(self.connections, key=lambda c:c.local, reverse=True)
if not self.owned:
connections = [c for c in connections if c.local is False]
# Try connecting to all known resource connections in parellel, but
# only return the first server (in order) that provides a response.
threads, results = [], []
for testssl, attr in self.SSLTESTS:
if ssl in [None, testssl]:
for i in range(len(connections)):
uri = getattr(connections[i], attr)
args = (uri, results, len(results))
results.append(None)
threads.append(Thread(target=self._connect, args=args))
threads[-1].start()
for thread in threads:
thread.join()
# At this point we have a list of result tuples containing (uri, PlexServer)
# or (uri, None) in the case a connection could not be established.
for uri, result in results:
log.info('Testing connection: %s %s', uri, 'OK' if result else 'ERR')
results = list(filter(None, [r[1] for r in results if r]))
if not results:
raise NotFound('Unable to connect to resource: %s' % self.name)
log.info('Connecting to server: %s', results[0])
return results[0]
def _connect(self, uri, results, i):
try:
from plexapi.server import PlexServer
results[i] = (uri, PlexServer(uri, self.accessToken))
except NotFound:
results[i] = (uri, None)
@classmethod
def fetch_resources(cls, token):
headers = plexapi.BASE_HEADERS
headers['X-Plex-Token'] = token
log.info('GET %s?X-Plex-Token=%s', cls.RESOURCES, token)
response = requests.get(cls.RESOURCES, headers=headers, timeout=TIMEOUT)
data = ElementTree.fromstring(response.text.encode('utf8'))
return [MyPlexResource(elem) for elem in data]
class ResourceConnection(object):
def __init__(self, data):
self.protocol = data.attrib.get('protocol')
self.address = data.attrib.get('address')
self.port = cast(int, data.attrib.get('port'))
self.uri = data.attrib.get('uri')
self.local = cast(bool, data.attrib.get('local'))
@property
def http_uri(self):
return 'http://%s:%s' % (self.address, self.port)
def __repr__(self):
return '<%s:%s>' % (self.__class__.__name__, self.uri.encode('utf8'))
def _findResource(resources, search, port=32400):
""" Searches server.name """
search = search.lower()
log.info('Looking for server: %s', search)
for server in resources:
if search == server.name.lower():
log.info('Server found: %s', server)
return server
log.info('Unable to find server: %s', search)
raise NotFound('Unable to find server: %s' % search)
class MyPlexDevice(object):
DEVICES = 'https://plex.tv/devices.xml'
def __init__(self, data):
self.name = data.attrib.get('name')
self.publicAddress = data.attrib.get('publicAddress')
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.model = data.attrib.get('model')
self.vendor = data.attrib.get('vendor')
self.provides = data.attrib.get('provides').split(',')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.version = data.attrib.get('version')
self.id = data.attrib.get('id')
self.token = data.attrib.get('token')
self.screenResolution = data.attrib.get('screenResolution')
self.screenDensity = data.attrib.get('screenDensity')
self.connectionsUris = [connection.attrib.get('uri') for connection in data.iter('Connection')]
def __repr__(self):
return '<%s:%s:%s>' % (self.__class__.__name__, self.name.encode('utf8'), self.product.encode('utf8'))
@property
def isReachable(self):
return len(self.connectionsUris)
@property
def baseUrl(self):
if not self.isReachable:
raise Exception('This device is not reachable')
return self.connectionsUris[0]
@classmethod
def fetch_resources(cls, token):
headers = plexapi.BASE_HEADERS
headers['X-Plex-Token'] = token
log.info('GET %s?X-Plex-Token=%s', cls.DEVICES, token)
response = requests.get(cls.DEVICES, headers=headers, timeout=TIMEOUT)
data = ElementTree.fromstring(response.text.encode('utf8'))
return [MyPlexDevice(elem) for elem in data]
def sendCommand(self, command, args=None):
url = '%s%s' % (self.url(command), utils.joinArgs(args))
log.info('GET %s', url)
headers = plexapi.BASE_HEADERS
headers['X-Plex-Target-Client-Identifier'] = self.clientIdentifier
response = requests.get(url, headers=headers, timeout=TIMEOUT)
if response.status_code != requests.codes.ok:
codename = codes.get(response.status_code)[0]
raise BadRequest('(%s) %s' % (response.status_code, codename))
data = response.text.encode('utf8')
if data:
try:
return ElementTree.fromstring(data)
except:
pass
return None
def url(self, path):
return '%s/player/%s' % (self.baseUrl, path.lstrip('/'))
# Navigation Commands
def moveUp(self, args=None): self.sendCommand('navigation/moveUp', args) # noqa
def moveDown(self, args=None): self.sendCommand('navigation/moveDown', args) # noqa
def moveLeft(self, args=None): self.sendCommand('navigation/moveLeft', args) # noqa
def moveRight(self, args=None): self.sendCommand('navigation/moveRight', args) # noqa
def pageUp(self, args=None): self.sendCommand('navigation/pageUp', args) # noqa
def pageDown(self, args=None): self.sendCommand('navigation/pageDown', args) # noqa
def nextLetter(self, args=None): self.sendCommand('navigation/nextLetter', args) # noqa
def previousLetter(self, args=None): self.sendCommand('navigation/previousLetter', args) # noqa
def select(self, args=None): self.sendCommand('navigation/select', args) # noqa
def back(self, args=None): self.sendCommand('navigation/back', args) # noqa
def contextMenu(self, args=None): self.sendCommand('navigation/contextMenu', args) # noqa
def toggleOSD(self, args=None): self.sendCommand('navigation/toggleOSD', args) # noqa
# Playback Commands
def play(self, args=None): self.sendCommand('playback/play', args) # noqa
def pause(self, args=None): self.sendCommand('playback/pause', args) # noqa
def stop(self, args=None): self.sendCommand('playback/stop', args) # noqa
def stepForward(self, args=None): self.sendCommand('playback/stepForward', args) # noqa
def bigStepForward(self, args=None): self.sendCommand('playback/bigStepForward', args) # noqa
def stepBack(self, args=None): self.sendCommand('playback/stepBack', args) # noqa
def bigStepBack(self, args=None): self.sendCommand('playback/bigStepBack', args) # noqa
def skipNext(self, args=None): self.sendCommand('playback/skipNext', args) # noqa
def skipPrevious(self, args=None): self.sendCommand('playback/skipPrevious', args) # noqa
|
|
"""Simplified retina model."""
import logging
import numpy as np
import cv2
import cv2.cv as cv
from collections import OrderedDict
from lumos.context import Context
from lumos.input import Projector, run
from ..photoreceptor import Rod, Cone
class Retina(object):
"""
A multi-layered surface for hosting different types of neurons that make up a retina, simplified version.
[Deprecated] Use VisualSystem instead.
"""
default_image_size = (480, 480)
def __init__(self, imageSize=default_image_size, timeNow=0.0):
# * Initialize members, parameters
self.context = Context.getInstance()
self.logger = logging.getLogger(__name__)
self.logger.debug("Creating simplified Retina") # to distinguish from other Retina versions
self.imageSize = imageSize
self.imageCenter = (self.imageSize[1] / 2, self.imageSize[0] / 2)
self.timeNow = timeNow
self.bounds = np.float32([[0.0, 0.0, 2.0], [self.imageSize[0] - 1, self.imageSize[1] - 1, 4.0]])
self.center = (self.bounds[0] + self.bounds[1]) / 2
self.logger.debug("Retina center: {}, image size: {}".format(self.center, self.imageSize))
self.bipolarBlurSize = (5, 5) # size of blurring kernel used when computing Bipolar cell response
self.ganglionCenterSurroundKernel = np.float32(
[ [ -1, -1, -1, -1, -1, -1, -1 ],
[ -1, -1, -1, -1, -1, -1, -1 ],
[ -1, -1, 7, 7, 7, -1, -1 ],
[ -1, -1, 7, 9, 7, -1, -1 ],
[ -1, -1, 7, 7, 7, -1, -1 ],
[ -1, -1, -1, -1, -1, -1, -1 ],
[ -1, -1, -1, -1, -1, -1, -1 ] ])
self.ganglionCenterSurroundKernel /= np.sum(self.ganglionCenterSurroundKernel) # normalize
#self.logger.info("Ganglion center-surround kernel:\n{}".format(self.ganglionCenterSurroundKernel)) # [debug]
self.ganglionKernelLevels = 4
self.ganglionKernels = [None] * self.ganglionKernelLevels
self.ganglionKernels[0] = self.ganglionCenterSurroundKernel
for i in xrange(1, self.ganglionKernelLevels):
self.ganglionKernels[i] = cv2.resize(self.ganglionKernels[i - 1], dsize=None, fx=2, fy=2)
self.ganglionKernels[i] /= np.sum(self.ganglionKernels[i]) # normalize
#self.logger.info("Ganglion center-surround kernel sizes ({} levels): {}".format(self.ganglionKernelLevels, ", ".join("{}".format(k.shape) for k in self.ganglionKernels))) # [debug]
# * Image and related members
self.imageCenter = (self.imageSize[1] / 2, self.imageSize[0] / 2)
self.imageShapeC3 = (self.imageSize[1], self.imageSize[0], 3) # numpy shape for 3 channel images
self.imageShapeC1 = (self.imageSize[1], self.imageSize[0]) # numpy shape for single channel images
# NOTE Image shapes (h, w, 1) and (h, w) are not compatible unless we use keepdims=True for numpy operations
self.imageTypeInt = np.uint8 # numpy dtype for integer-valued images
self.imageTypeFloat = np.float32 # numpy dtype for real-valued images
self.images = OrderedDict()
# ** RGB and HSV images
self.images['BGR'] = np.zeros(self.imageShapeC3, dtype=self.imageTypeInt)
self.images['HSV'] = np.zeros(self.imageShapeC3, dtype=self.imageTypeInt)
self.images['H'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt)
self.images['S'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt)
self.images['V'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt)
# ** Freq/hue-dependent response images for rods and different cone types
self.imageRod = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesCone = dict() # NOTE dict keys must match names of Cone.cone_types
self.imagesCone['S'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesCone['M'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesCone['L'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
# ** Bipolar and Ganglion cell response images
# TODO Add more Ganglion cell types with different receptive field properties (color-opponent cells)
# 'RG' +Red -Green
# 'GR' +Green -Red
# 'RB' +Red -Blue
# 'BR' +Blue -Red
# 'BY' +Blue -Yellow
# 'YB' +Yellow -Blue
# 'WK' +White -Black (currently 'ON')
# 'KW' +Black -White (currently 'OFF')
# NOTE: R = L cones, G = M cones, B = S cones
self.imagesBipolar = dict()
self.imagesBipolar['ON'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesBipolar['OFF'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesGanglion = dict()
self.imagesGanglion['ON'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesGanglion['OFF'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
# TODO Verify why image shapes (h, w, 1) and (h, w) are not compatible (use keepdims=True for numpy operations)
self.imagesGanglion['RG'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesGanglion['GR'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesGanglion['RB'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesGanglion['BR'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesGanglion['BY'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.imagesGanglion['YB'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
# ** Combined response (salience) image
self.imageSalience = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
# ** Spatial attention map with a central (covert) spotlight (currently unused; TODO move to VisualCortex? also, use np.ogrid?)
self.imageAttention = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
cv2.circle(self.imageAttention, (self.imageSize[1] / 2, self.imageSize[0] / 2), self.imageSize[0] / 3, 1.0, cv.CV_FILLED)
self.imageAttention = cv2.blur(self.imageAttention, (self.imageSize[0] / 4, self.imageSize[0] / 4)) # coarse blur
# ** Output image(s)
if self.context.options.gui:
self.imageOut = np.zeros(self.imageShapeC3, dtype=self.imageTypeInt)
def initialize(self, imageIn, timeNow):
pass # to emulate FrameProcessor-like interface
def process(self, imageIn, timeNow):
self.timeNow = timeNow
self.logger.debug("Retina update @ {}".format(self.timeNow))
# * Get HSV
self.images['BGR'][:] = imageIn
self.images['HSV'] = cv2.cvtColor(self.images['BGR'], cv2.COLOR_BGR2HSV)
self.images['H'], self.images['S'], self.images['V'] = cv2.split(self.images['HSV'])
# * Compute Rod and Cone responses
# TODO Need non-linear response to hue, sat, val (less dependent on sat, val for cones)
self.imageRod = np.float32(180 - cv2.absdiff(self.images['H'], Rod.rod_type.hue) % 180) * 255 * self.images['V'] * Rod.rod_type.responseFactor # hack: use constant sat = 200 to make response independent of saturation
self.imagesCone['S'] = np.float32(180 - cv2.absdiff(self.images['H'], Cone.cone_types[0].hue) % 180) * self.images['S'] * self.images['V'] * Cone.cone_types[0].responseFactor
self.imagesCone['M'] = np.float32(180 - cv2.absdiff(self.images['H'], Cone.cone_types[1].hue) % 180) * self.images['S'] * self.images['V'] * Cone.cone_types[1].responseFactor
self.imagesCone['L'] = np.float32(180 - cv2.absdiff(self.images['H'], Cone.cone_types[2].hue) % 180) * self.images['S'] * self.images['V'] * Cone.cone_types[2].responseFactor
# * Compute Bipolar and Ganglion cell responses
# ** Blurring is a step that is effectively achieved in biology by horizontal cells
imageRodBlurred = cv2.blur(self.imageRod, self.bipolarBlurSize)
self.imagesBipolar['ON'] = np.clip(self.imageRod - 0.75 * imageRodBlurred, 0.0, 1.0)
self.imagesBipolar['OFF'] = np.clip((1.0 - self.imageRod) - 0.75 * (1.0 - imageRodBlurred), 0.0, 1.0) # same as (1 - ON response)?
#imagesConeSBlurred = cv2.blur(self.imagesCone['S'], self.bipolarBlurSize)
#imagesConeMBlurred = cv2.blur(self.imagesCone['M'], self.bipolarBlurSize)
#imagesConeLBlurred = cv2.blur(self.imagesCone['L'], self.bipolarBlurSize)
# ** Ganglion cells simply add up responses from a (bunch of) central bipolar cell(s) (ON/OFF) and surrounding antagonistic bipolar cells (OFF/ON)
# *** Method 1: Center - Surround
#imageGanglionCenterON = cv2.filter2D(self.imagesBipolar['ON'], -1, self.ganglionCenterKernel)
#imageGanglionSurroundOFF = cv2.filter2D(self.imagesBipolar['OFF'], -1, self.ganglionSurroundKernel)
#self.imagesGanglion['ON'] = 0.75 * imageGanglionCenterON + 0.25 * imageGanglionSurroundOFF
# *** Method 2: Center-Surround kernel
#self.imagesGanglion['ON'] = np.clip(cv2.filter2D(self.imagesBipolar['ON'], -1, self.ganglionCenterSurroundKernel), 0.0, 1.0)
#self.imagesGanglion['OFF'] = np.clip(cv2.filter2D(self.imagesBipolar['OFF'], -1, self.ganglionCenterSurroundKernel), 0.0, 1.0)
# *** Method 3: Multi-level Center-Surround kernels, taking maximum
self.imagesGanglion['ON'].fill(0.0)
self.imagesGanglion['OFF'].fill(0.0)
self.imagesGanglion['RG'].fill(0.0)
self.imagesGanglion['GR'].fill(0.0)
self.imagesGanglion['RB'].fill(0.0)
self.imagesGanglion['BR'].fill(0.0)
self.imagesGanglion['BY'].fill(0.0)
self.imagesGanglion['YB'].fill(0.0)
for k in self.ganglionKernels:
# Rod pathway
self.imagesGanglion['ON'] = np.maximum(self.imagesGanglion['ON'], np.clip(cv2.filter2D(self.imagesBipolar['ON'], -1, k), 0.0, 1.0))
self.imagesGanglion['OFF'] = np.maximum(self.imagesGanglion['OFF'], np.clip(cv2.filter2D(self.imagesBipolar['OFF'], -1, k), 0.0, 1.0))
# Cone pathway
imageRG = self.imagesCone['L'] - self.imagesCone['M']
imageRB = self.imagesCone['L'] - self.imagesCone['S']
imageBY = self.imagesCone['S'] - (self.imagesCone['L'] + self.imagesCone['M']) / 2
self.imagesGanglion['RG'] = np.maximum(self.imagesGanglion['RG'], np.clip(cv2.filter2D(imageRG, -1, k), 0.0, 1.0))
self.imagesGanglion['GR'] = np.maximum(self.imagesGanglion['GR'], np.clip(cv2.filter2D(-imageRG, -1, k), 0.0, 1.0))
self.imagesGanglion['RB'] = np.maximum(self.imagesGanglion['RB'], np.clip(cv2.filter2D(imageRB, -1, k), 0.0, 1.0))
self.imagesGanglion['BR'] = np.maximum(self.imagesGanglion['BR'], np.clip(cv2.filter2D(-imageRB, -1, k), 0.0, 1.0))
self.imagesGanglion['BY'] = np.maximum(self.imagesGanglion['BY'], np.clip(cv2.filter2D(imageBY, -1, k), 0.0, 1.0))
self.imagesGanglion['YB'] = np.maximum(self.imagesGanglion['YB'], np.clip(cv2.filter2D(-imageBY, -1, k), 0.0, 1.0))
# * Compute combined (salience) image; TODO incorporate attention weighting (spatial, as well as by visual feature)
# ** Method 1: Max of all Ganglion cell images
self.imageSalience.fill(0.0)
for ganglionType, ganglionImage in self.imagesGanglion.iteritems():
self.imageSalience = np.maximum(self.imageSalience, ganglionImage)
#self.imageSalience *= self.imageAttention # TODO evaluate if this is necessary
# * TODO Compute feature vector of attended region
# * Show output images if in GUI mode
if self.context.options.gui:
#cv2.imshow("Hue", self.images['H'])
#cv2.imshow("Saturation", self.images['S'])
#cv2.imshow("Value", self.images['V'])
cv2.imshow("Rod response", self.imageRod)
cv2.imshow("S-cone response", self.imagesCone['S'])
cv2.imshow("M-cone response", self.imagesCone['M'])
cv2.imshow("L-cone response", self.imagesCone['L'])
cv2.imshow("ON Bipolar cells", self.imagesBipolar['ON'])
cv2.imshow("OFF Bipolar cells", self.imagesBipolar['OFF'])
#cv2.imshow("ON Ganglion cells", self.imagesGanglion['ON'])
#cv2.imshow("OFF Ganglion cells", self.imagesGanglion['OFF'])
for ganglionType, ganglionImage in self.imagesGanglion.iteritems():
cv2.imshow("{} Ganglion cells".format(ganglionType), ganglionImage)
cv2.imshow("Salience", self.imageSalience)
# Designate a representative output image
self.imageOut = self.imageSalience
#_, self.imageOut = cv2.threshold(self.imageOut, 0.15, 1.0, cv2.THRESH_TOZERO) # apply threshold to remove low-response regions
return True, self.imageOut
if __name__ == "__main__":
Context.createInstance(description="Test application that uses a SimplifiedProjector to run image input through a (simplified) Retina.")
run(Projector(Retina()))
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abiquo Test Suite
"""
import sys
from libcloud.utils.py3 import ET
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.abiquo import AbiquoNodeDriver
from libcloud.common.abiquo import ForbiddenError, get_href
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.compute.base import NodeLocation, NodeImage
from libcloud.test.compute import TestCaseMixin
from libcloud.test import MockHttp, unittest
from libcloud.test.file_fixtures import ComputeFileFixtures
class AbiquoNodeDriverTest(TestCaseMixin):
"""
Abiquo Node Driver test suite
"""
@classmethod
def setUpClass(cls):
"""
Set up the driver with the main user
"""
AbiquoNodeDriver.connectionCls.conn_class = AbiquoMockHttp
cls.driver = AbiquoNodeDriver('son', 'goku',
'http://dummy.host.com/api')
def test_unauthorized_controlled(self):
"""
Test the Unauthorized Exception is Controlled.
Test, through the 'login' method, that a '401 Unauthorized'
raises a 'InvalidCredsError' instead of the 'MalformedUrlException'
"""
self.assertRaises(InvalidCredsError, AbiquoNodeDriver, 'son',
'goten', 'http://dummy.host.com/api')
def test_forbidden_controlled(self):
"""
Test the Forbidden Exception is Controlled.
Test, through the 'list_images' method, that a '403 Forbidden'
raises an 'ForbidenError' instead of the 'MalformedUrlException'
"""
AbiquoNodeDriver.connectionCls.conn_class = AbiquoMockHttp
conn = AbiquoNodeDriver('son', 'gohan', 'http://dummy.host.com/api')
self.assertRaises(ForbiddenError, conn.list_images)
def test_handle_other_errors_such_as_not_found(self):
"""
Test common 'logical' exceptions are controlled.
Test that common exception (normally 404-Not Found and 409-Conflict),
that return an XMLResponse with the explanation of the errors are
controlled.
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
self.assertRaises(LibcloudError, self.driver.list_images)
def test_ex_create_and_delete_empty_group(self):
"""
Test the creation and deletion of an empty group.
"""
group = self.driver.ex_create_group('libcloud_test_group')
group.destroy()
def test_create_node_no_image_raise_exception(self):
"""
Test 'create_node' without image.
Test the 'create_node' function without 'image' parameter raises
an Exception
"""
self.assertRaises(LibcloudError, self.driver.create_node)
def test_list_locations_response(self):
if not self.should_list_locations:
return None
locations = self.driver.list_locations()
self.assertTrue(isinstance(locations, list))
def test_create_node_specify_location(self):
"""
Test you can create a node specifying the location.
"""
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
self.driver.create_node(image=image, location=location)
def test_create_node_specify_wrong_location(self):
"""
Test you can not create a node with wrong location.
"""
image = self.driver.list_images()[0]
location = NodeLocation(435, 'fake-location', 'Spain', self.driver)
self.assertRaises(LibcloudError, self.driver.create_node, image=image,
location=location)
def test_create_node_specify_wrong_image(self):
"""
Test image compatibility.
Some locations only can handle a group of images, not all of them.
Test you can not create a node with incompatible image-location.
"""
# Create fake NodeImage
image = NodeImage(3234, 'dummy-image', self.driver)
location = self.driver.list_locations()[0]
# With this image, it should raise an Exception
self.assertRaises(LibcloudError, self.driver.create_node, image=image,
location=location)
def test_create_node_specify_group_name(self):
"""
Test 'create_node' into a concrete group.
"""
image = self.driver.list_images()[0]
self.driver.create_node(image=image, group_name='new_group_name')
def test_create_group_location_does_not_exist(self):
"""
Test 'create_node' with an unexistent location.
Defines a 'fake' location and tries to create a node into it.
"""
location = NodeLocation(435, 'fake-location', 'Spain', self.driver)
# With this location, it should raise an Exception
self.assertRaises(LibcloudError, self.driver.ex_create_group,
name='new_group_name',
location=location)
def test_destroy_node_response(self):
"""
'destroy_node' basic test.
Override the destroy to return a different node available
to be undeployed. (by default it returns an already undeployed node,
for test creation).
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_destroy_node_response_failed(self):
"""
'destroy_node' asynchronous error.
Test that the driver handles correctly when, for some reason,
the 'destroy' job fails.
"""
self.driver = AbiquoNodeDriver('muten', 'roshi',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertFalse(ret)
def test_destroy_node_allocation_state(self):
"""
Test the 'destroy_node' invalid state.
Try to destroy a node when the node is not running.
"""
self.driver = AbiquoNodeDriver('ve', 'geta',
'http://dummy.host.com/api')
# Override the destroy to return a different node available to be
# undeployed
node = self.driver.list_nodes()[0]
# The mock class with the user:password 've:geta' returns a node that
# is in 'ALLOCATION' state and hence, the 'destroy_node' method should
# raise a LibcloudError
self.assertRaises(LibcloudError, self.driver.destroy_node, node)
def test_destroy_not_deployed_group(self):
"""
Test 'ex_destroy_group' when group is not deployed.
"""
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[1]
self.assertTrue(group.destroy())
def test_destroy_deployed_group(self):
"""
Test 'ex_destroy_group' when there are machines running.
"""
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[0]
self.assertTrue(group.destroy())
def test_destroy_deployed_group_failed(self):
"""
Test 'ex_destroy_group' fails.
Test driver handles correctly when, for some reason, the
asynchronous job fails.
"""
self.driver = AbiquoNodeDriver('muten', 'roshi',
'http://dummy.host.com/api')
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[0]
self.assertFalse(group.destroy())
def test_destroy_group_invalid_state(self):
"""
Test 'ex_destroy_group' invalid state.
Test the Driver raises an exception when the group is in
invalid temporal state.
"""
self.driver = AbiquoNodeDriver('ve', 'geta',
'http://dummy.host.com/api')
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[1]
self.assertRaises(LibcloudError, group.destroy)
def test_run_node(self):
"""
Test 'ex_run_node' feature.
"""
node = self.driver.list_nodes()[0]
# Node is by default in NodeState.TERMINATED and AbiquoState ==
# 'NOT_ALLOCATED'
# so it is available to be runned
self.driver.ex_run_node(node)
def test_run_node_invalid_state(self):
"""
Test 'ex_run_node' invalid state.
Test the Driver raises an exception when try to run a
node that is in invalid state to run.
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
# Node is by default in AbiquoState = 'ON' for user 'go:trunks'
# so is not available to be runned
self.assertRaises(LibcloudError, self.driver.ex_run_node, node)
def test_run_node_failed(self):
"""
Test 'ex_run_node' fails.
Test driver handles correctly when, for some reason, the
asynchronous job fails.
"""
self.driver = AbiquoNodeDriver('ten', 'shin',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
# Node is in the correct state, but it fails because of the
# async task and it raises the error.
self.assertRaises(LibcloudError, self.driver.ex_run_node, node)
def test_get_href(self):
xml = '''
<datacenter>
<link href="http://10.60.12.7:80/api/admin/datacenters/2"
type="application/vnd.abiquo.datacenter+xml" rel="edit1"/>
<link href="http://10.60.12.7:80/ponies/bar/foo/api/admin/datacenters/3"
type="application/vnd.abiquo.datacenter+xml" rel="edit2"/>
<link href="http://vdcbridge.interoute.com:80/jclouds/apiouds/api/admin/enterprises/1234"
type="application/vnd.abiquo.datacenter+xml" rel="edit3"/>
</datacenter>
'''
elem = ET.XML(xml)
href = get_href(element=elem, rel='edit1')
self.assertEqual(href, '/admin/datacenters/2')
href = get_href(element=elem, rel='edit2')
self.assertEqual(href, '/admin/datacenters/3')
href = get_href(element=elem, rel='edit3')
self.assertEqual(href, '/admin/enterprises/1234')
class AbiquoMockHttp(MockHttp):
"""
Mock the functionallity of the remote Abiquo API.
"""
fixtures = ComputeFileFixtures('abiquo')
fixture_tag = 'default'
def _api_login(self, method, url, body, headers):
if headers['Authorization'] == 'Basic c29uOmdvdGVu':
expected_response = self.fixtures.load('unauthorized_user.html')
expected_status = httplib.UNAUTHORIZED
else:
expected_response = self.fixtures.load('login.xml')
expected_status = httplib.OK
return (expected_status, expected_response, {}, '')
def _api_cloud_virtualdatacenters(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('vdcs.xml'), {}, '')
def _api_cloud_virtualdatacenters_4(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('vdc_4.xml'), {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances(self, method, url, body, headers):
if method == 'POST':
vapp_name = ET.XML(body).findtext('name')
if vapp_name == 'libcloud_test_group':
# we come from 'test_ex_create_and_delete_empty_group(self):'
# method and so, we return the 'ok' return
response = self.fixtures.load('vdc_4_vapp_creation_ok.xml')
return (httplib.OK, response, {}, '')
elif vapp_name == 'new_group_name':
# we come from 'test_ex_create_and_delete_empty_group(self):'
# method and so, we return the 'ok' return
response = self.fixtures.load('vdc_4_vapp_creation_ok.xml')
return (httplib.OK, response, {}, '')
else:
# It will be a 'GET';
return (httplib.OK, self.fixtures.load('vdc_4_vapps.xml'), {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_5(self, method, url, body, headers):
if method == 'GET':
if headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
# Try to destroy a group with 'needs_sync' state
response = self.fixtures.load('vdc_4_vapp_5_needs_sync.xml')
else:
# Try to destroy a group with 'undeployed' state
response = self.fixtures.load('vdc_4_vapp_5.xml')
return (httplib.OK, response, {}, '')
else:
# it will be a 'DELETE'
return (httplib.NO_CONTENT, '', {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6(self, method, url, body, headers):
if method == 'GET':
# deployed vapp
response = self.fixtures.load('vdc_4_vapp_6.xml')
return (httplib.OK, response, {}, '')
else:
# it will be a 'DELETE'
return (httplib.NO_CONTENT, '', {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_1da8c8b6_86f6_49ef_9d29_57dcc73b875a(self, method, url, body, headers):
if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=':
# User 'muten:roshi' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_undeploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_undeploy_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_5_virtualmachines(
self, method, url, body, headers):
# This virtual app never have virtual machines
if method == 'GET':
response = self.fixtures.load('vdc_4_vapp_5_vms.xml')
return (httplib.OK, response, {}, '')
elif method == 'POST':
# it must be a POST
response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines(
self, method, url, body, headers):
# Default-created virtual app virtual machines'
if method == 'GET':
if headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
response = self.fixtures.load('vdc_4_vapp_6_vms_allocated.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vms.xml')
return (httplib.OK, response, {}, '')
else:
# it must be a POST
response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3(self, method, url, body, headers):
if (headers['Authorization'] == 'Basic Z286dHJ1bmtz' or
headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk='):
# Undeploy node
response = self.fixtures.load("vdc_4_vapp_6_vm_3_deployed.xml")
elif headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
# Try to undeploy a node with 'allocation' state
response = self.fixtures.load('vdc_4_vapp_6_vm_3_allocated.xml')
else:
# Get node
response = self.fixtures.load('vdc_4_vapp_6_vm_3.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_deploy(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_b44fe278_6b0f_4dfb_be81_7c03006a93cb(self, method, url, body, headers):
if headers['Authorization'] == 'Basic dGVuOnNoaW4=':
# User 'ten:shin' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_vm_3_deploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_action_undeploy(
self, method, url, body, headers):
response = self.fixtures.load('vdc_4_vapp_6_undeploy.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_reset(self, method,
url, body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_a8c9818e_f389_45b7_be2c_3db3a9689940(self, method, url, body, headers):
if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=':
# User 'muten:roshi' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_undeploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_undeploy(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_undeploy.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_network_nics(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_nics.xml')
return (httplib.OK, response, {}, '')
def _api_admin_datacenters(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('dcs.xml'), {}, '')
def _api_admin_enterprises_1(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1.xml'), {}, '')
def _api_admin_enterprises_1_datacenterrepositories(self, method, url, body, headers):
# When the user is the common one for all the tests ('son, 'goku')
# it creates this basic auth and we return the datacenters value
if headers['Authorization'] == 'Basic Z286dHJ1bmtz':
expected_response = self.fixtures.load("not_found_error.xml")
return (httplib.NOT_FOUND, expected_response, {}, '')
elif headers['Authorization'] != 'Basic c29uOmdvaGFu':
return (httplib.OK, self.fixtures.load('ent_1_dcreps.xml'), {}, '')
else:
# son:gohan user: forbidden error
expected_response = self.fixtures.load("privilege_errors.html")
return (httplib.FORBIDDEN, expected_response, {}, '')
def _api_admin_enterprises_1_datacenterrepositories_2(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1_dcrep_2.xml'), {}, '')
def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1_dcrep_2_templates.xml'),
{}, '')
def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates_11(self, method, url, body, headers):
return (
httplib.OK, self.fixtures.load('ent_1_dcrep_2_template_11.xml'),
{}, '')
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VMware vSphere driver supporting vSphere v5.5.
Note: This driver requires pysphere package
(https://pypi.python.org/pypi/pysphere) which can be installed using pip. For
more information, please refer to the official documentation.
"""
import os
import atexit
try:
import pysphere
pysphere
except ImportError:
raise ImportError('Missing "pysphere" dependency. You can install it '
'using pip - pip install pysphere')
from pysphere import VIServer
from pysphere.vi_task import VITask
from pysphere.vi_mor import VIMor, MORTypes
from pysphere.resources import VimService_services as VI
from pysphere.vi_virtual_machine import VIVirtualMachine
from libcloud.utils.decorators import wrap_non_libcloud_exceptions
from libcloud.common.base import ConnectionUserAndKey
from libcloud.common.types import LibcloudError
from libcloud.common.types import InvalidCredsError
from libcloud.compute.base import NodeDriver
from libcloud.compute.base import NodeLocation
from libcloud.compute.base import NodeImage
from libcloud.compute.base import Node
from libcloud.compute.types import NodeState, Provider
from libcloud.utils.networking import is_public_subnet
__all__ = [
'VSphereNodeDriver',
'VSphere_5_5_NodeDriver'
]
DEFAULT_API_VERSION = '5.5'
DEFAULT_CONNECTION_TIMEOUT = 5 # default connection timeout in seconds
class VSphereConnection(ConnectionUserAndKey):
def __init__(self, user_id, key, secure=True,
host=None, port=None, url=None, timeout=None, **kwargs):
if host and url:
raise ValueError('host and url arguments are mutually exclusive')
if host:
host_or_url = host
elif url:
host_or_url = url
else:
raise ValueError('Either "host" or "url" argument must be '
'provided')
self.host_or_url = host_or_url
self.client = None
super(VSphereConnection, self).__init__(user_id=user_id,
key=key, secure=secure,
host=host, port=port,
url=url, timeout=timeout,
**kwargs)
def connect(self):
self.client = VIServer()
trace_file = os.environ.get('LIBCLOUD_DEBUG', None)
try:
self.client.connect(host=self.host_or_url, user=self.user_id,
password=self.key,
sock_timeout=DEFAULT_CONNECTION_TIMEOUT,
trace_file=trace_file)
except Exception as e:
message = e.message
if hasattr(e, 'strerror'):
message = getattr(e, 'strerror', e.message)
fault = getattr(e, 'fault', None)
if fault == 'InvalidLoginFault':
raise InvalidCredsError(message)
raise LibcloudError(value=message, driver=self.driver)
atexit.register(self.disconnect)
def disconnect(self):
if not self.client:
return
try:
self.client.disconnect()
except Exception:
# Ignore all the disconnect errors
pass
def run_client_method(self, method_name, **method_kwargs):
method = getattr(self.client, method_name, None)
return method(**method_kwargs)
class VSphereNodeDriver(NodeDriver):
name = 'VMware vSphere'
website = 'http://www.vmware.com/products/vsphere/'
type = Provider.VSPHERE
connectionCls = VSphereConnection
NODE_STATE_MAP = {
'POWERED ON': NodeState.RUNNING,
'POWERED OFF': NodeState.STOPPED,
'SUSPENDED': NodeState.SUSPENDED,
'POWERING ON': NodeState.PENDING,
'POWERING OFF': NodeState.PENDING,
'SUSPENDING': NodeState.PENDING,
'RESETTING': NodeState.PENDING,
'BLOCKED ON MSG': NodeState.ERROR,
'REVERTING TO SNAPSHOT': NodeState.PENDING
}
def __new__(cls, username, password, secure=True, host=None, port=None,
url=None, api_version=DEFAULT_API_VERSION, **kwargs):
if cls is VSphereNodeDriver:
if api_version == '5.5':
cls = VSphere_5_5_NodeDriver
else:
raise NotImplementedError('Unsupported API version: %s' %
(api_version))
return super(VSphereNodeDriver, cls).__new__(cls)
def __init__(self, username, password, secure=True,
host=None, port=None, url=None, timeout=None):
self.url = url
super(VSphereNodeDriver, self).__init__(key=username, secret=password,
secure=secure, host=host,
port=port, url=url)
@wrap_non_libcloud_exceptions
def list_locations(self):
"""
List available locations.
In vSphere case, a location represents a datacenter.
"""
datacenters = self.connection.client.get_datacenters()
locations = []
for id, name in datacenters.items():
location = NodeLocation(id=id, name=name, country=None,
driver=self)
locations.append(location)
return locations
@wrap_non_libcloud_exceptions
def list_images(self):
"""
List available images (templates).
"""
server = self.connection.client
names = ['name', 'config.uuid', 'config.template']
properties = server._retrieve_properties_traversal(
property_names=names,
from_node=None,
obj_type=MORTypes.VirtualMachine)
images = []
for prop in properties:
id = None
name = None
is_template = False
for item in prop.PropSet:
if item.Name == 'config.uuid':
id = item.Val
if item.Name == 'name':
name = item.Val
elif item.Name == 'config.template':
is_template = item.Val
if is_template:
image = NodeImage(id=id, name=name, driver=self)
images.append(image)
return images
@wrap_non_libcloud_exceptions
def list_nodes(self):
vm_paths = self.connection.client.get_registered_vms()
nodes = self._to_nodes(vm_paths=vm_paths)
return nodes
@wrap_non_libcloud_exceptions
@wrap_non_libcloud_exceptions
def ex_clone_node(self, node, name, power_on=True, template=False):
"""
Clone the provided node.
:param node: Node to clone.
:type node: :class:`libcloud.compute.base.Node`
:param name: Name of the new node.
:type name: ``str``
:param power_on: Power the new node on after being created.
:type power_on: ``bool``
:param template: Specifies whether or not the new virtual machine
should be marked as a template.
:type template: ``bool``
:return: New node.
:rtype: :class:`libcloud.compute.base.Node`
"""
vm = self._get_vm_for_node(node=node)
new_vm = vm.clone(name=name, power_on=power_on, template=template)
new_node = self._to_node(vm=new_vm)
return new_node
@wrap_non_libcloud_exceptions
def ex_migrate_node(self, node, resource_pool=None, host=None,
priority='default'):
"""
Migrate provided node to a new host or resource pool.
:param node: Node to clone.
:type node: :class:`libcloud.compute.base.Node`
:param resource_pool: ID of the target resource pool to migrate the
node into.
:type resource_pool: ``str``
:param host: Target host to migrate the host to.
:type host: ``str``
:param priority: Migration task priority. Possible values: default,
high, low.
:type priority: ``str``
:return: True on success.
:rtype: ``bool``
"""
vm = self._get_vm_for_node(node=node)
vm.migrate(priority=priority, resource_pool=resource_pool, host=host)
return True
@wrap_non_libcloud_exceptions
def reboot_node(self, node):
vm = self._get_vm_for_node(node=node)
vm.reset()
return True
@wrap_non_libcloud_exceptions
def destroy_node(self, node, ex_remove_files=True):
"""
:param ex_remove_files: Remove all the files from the datastore.
:type ex_remove_files: ``bool``
"""
ex_remove_files = False
vm = self._get_vm_for_node(node=node)
server = self.connection.client
# Based on code from
# https://pypi.python.org/pypi/pyxenter
if ex_remove_files:
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
# pylint: disable=no-member
ret = server._proxy.Destroy_Task(request)._returnva
# pylint: enable=no-member
task = VITask(ret, server)
# Wait for the task to finish
status = task.wait_for_state([task.STATE_SUCCESS,
task.STATE_ERROR])
if status == task.STATE_ERROR:
raise LibcloudError('Error destroying node: %s' %
(task.get_error_message()))
else:
request = VI.UnregisterVMRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = server._proxy.UnregisterVM(request)
task = VITask(ret, server)
return True
@wrap_non_libcloud_exceptions
def ex_stop_node(self, node):
vm = self._get_vm_for_node(node=node)
vm.power_off()
return True
@wrap_non_libcloud_exceptions
def ex_start_node(self, node):
vm = self._get_vm_for_node(node=node)
vm.power_on()
return True
@wrap_non_libcloud_exceptions
def ex_suspend_node(self, node):
vm = self._get_vm_for_node(node=node)
vm.suspend()
return True
@wrap_non_libcloud_exceptions
def ex_get_resource_pools(self):
"""
Return all the available resource pools.
:rtype: ``dict``
"""
result = self.connection.client.get_resource_pools()
return result
@wrap_non_libcloud_exceptions
def ex_get_resource_pool_name(self, node):
"""
Retrieve resource pool name for the provided node.
:rtype: ``str``
"""
vm = self._get_vm_for_node(node=node)
return vm.get_resource_pool_name()
@wrap_non_libcloud_exceptions
def ex_get_hosts(self):
"""
Return all the available hosts.
:rtype: ``dict``
"""
result = self.connection.client.get_hosts()
return result
@wrap_non_libcloud_exceptions
def ex_get_datastores(self):
"""
Return all the available datastores.
:rtype: ``dict``
"""
result = self.connection.client.get_datastores()
return result
@wrap_non_libcloud_exceptions
def ex_get_node_by_path(self, path):
"""
Retrieve Node object for a VM with a provided path.
:type path: ``str``
:rtype: :class:`libcloud.compute.base.Node`
"""
vm = self.connection.client.get_vm_by_path(path)
node = self._to_node(vm=vm)
return node
def ex_get_node_by_uuid(self, uuid):
"""
Retrieve Node object for a VM with a provided uuid.
:type uuid: ``str``
"""
vm = self._get_vm_for_uuid(uuid=uuid)
node = self._to_node(vm=vm)
return node
@wrap_non_libcloud_exceptions
def ex_get_server_type(self):
"""
Return VMware installation type.
:rtype: ``str``
"""
return self.connection.client.get_server_type()
@wrap_non_libcloud_exceptions
def ex_get_api_version(self):
"""
Return API version of the vmware provider.
:rtype: ``str``
"""
return self.connection.client.get_api_version()
def _get_vm_for_uuid(self, uuid, datacenter=None):
"""
Retrieve VM for the provided UUID.
:type uuid: ``str``
"""
server = self.connection.client
dc_list = []
if datacenter and VIMor.is_mor(datacenter):
dc_list.append(datacenter)
else:
dc = server.get_datacenters()
if datacenter:
dc_list = [k for k, v in dc.iteritems() if v == datacenter]
else:
dc_list = list(dc.iterkeys())
for mor_dc in dc_list:
request = VI.FindByUuidRequestMsg()
search_index = server._do_service_content.SearchIndex
mor_search_index = request.new__this(search_index)
mor_search_index.set_attribute_type(MORTypes.SearchIndex)
request.set_element__this(mor_search_index)
mor_datacenter = request.new_datacenter(mor_dc)
mor_datacenter.set_attribute_type(MORTypes.Datacenter)
request.set_element_datacenter(mor_datacenter)
request.set_element_vmSearch(True)
request.set_element_uuid(uuid)
try:
# pylint: disable=no-member
vm = server._proxy.FindByUuid(request)._returnval
# pylint: enable=no-member
except VI.ZSI.FaultException:
pass
else:
if vm:
return VIVirtualMachine(server, vm)
return None
def _to_nodes(self, vm_paths):
nodes = []
for vm_path in vm_paths:
vm = self.connection.client.get_vm_by_path(vm_path)
node = self._to_node(vm=vm)
nodes.append(node)
return nodes
def _to_node(self, vm):
assert(isinstance(vm, VIVirtualMachine))
properties = vm.get_properties()
status = vm.get_status()
uuid = vm.properties.config.uuid
instance_uuid = vm.properties.config.instanceUuid
id = uuid
name = properties['name']
public_ips = []
private_ips = []
state = self.NODE_STATE_MAP.get(status, NodeState.UNKNOWN)
ip_address = properties.get('ip_address', None)
net = properties.get('net', [])
resource_pool_id = str(vm.properties.resourcePool._obj)
try:
operating_system = vm.properties.summary.guest.guestFullName,
except Exception:
operating_system = 'unknown'
extra = {
'uuid': uuid,
'instance_uuid': instance_uuid,
'path': properties['path'],
'resource_pool_id': resource_pool_id,
'hostname': properties.get('hostname', None),
'guest_id': properties['guest_id'],
'devices': properties.get('devices', {}),
'disks': properties.get('disks', []),
'net': net,
'overall_status': vm.properties.overallStatus,
'operating_system': operating_system,
'cpus': vm.properties.config.hardware.numCPU,
'memory_mb': vm.properties.config.hardware.memoryMB
}
# Add primary IP
if ip_address:
if is_public_subnet(ip_address):
public_ips.append(ip_address)
else:
private_ips.append(ip_address)
# Add other IP addresses
for nic in net:
ip_addresses = nic['ip_addresses']
for ip_address in ip_addresses:
try:
is_public = is_public_subnet(ip_address)
except Exception:
# TODO: Better support for IPv6
is_public = False
if is_public:
public_ips.append(ip_address)
else:
private_ips.append(ip_address)
# Remove duplicate IPs
public_ips = list(set(public_ips))
private_ips = list(set(private_ips))
node = Node(id=id, name=name, state=state, public_ips=public_ips,
private_ips=private_ips, driver=self, extra=extra)
return node
def _get_vm_for_node(self, node):
uuid = node.id
vm = self._get_vm_for_uuid(uuid=uuid)
return vm
def _ex_connection_class_kwargs(self):
kwargs = {
'url': self.url
}
return kwargs
class VSphere_5_5_NodeDriver(VSphereNodeDriver):
name = 'VMware vSphere v5.5'
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
class UserServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
REDIRECT_URL_TOO_LONG = 1
NOT_ALLOWED = 2
OAUTH_INVALID_TOKEN = 3
OAUTH_INVALID_REQUEST = 4
OAUTH_ERROR = 5
_ErrorCode_NAMES = {
0: "OK",
1: "REDIRECT_URL_TOO_LONG",
2: "NOT_ALLOWED",
3: "OAUTH_INVALID_TOKEN",
4: "OAUTH_INVALID_REQUEST",
5: "OAUTH_ERROR",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.UserServiceError'
class CreateLoginURLRequest(ProtocolBuffer.ProtocolMessage):
has_destination_url_ = 0
destination_url_ = ""
has_auth_domain_ = 0
auth_domain_ = ""
has_federated_identity_ = 0
federated_identity_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def destination_url(self): return self.destination_url_
def set_destination_url(self, x):
self.has_destination_url_ = 1
self.destination_url_ = x
def clear_destination_url(self):
if self.has_destination_url_:
self.has_destination_url_ = 0
self.destination_url_ = ""
def has_destination_url(self): return self.has_destination_url_
def auth_domain(self): return self.auth_domain_
def set_auth_domain(self, x):
self.has_auth_domain_ = 1
self.auth_domain_ = x
def clear_auth_domain(self):
if self.has_auth_domain_:
self.has_auth_domain_ = 0
self.auth_domain_ = ""
def has_auth_domain(self): return self.has_auth_domain_
def federated_identity(self): return self.federated_identity_
def set_federated_identity(self, x):
self.has_federated_identity_ = 1
self.federated_identity_ = x
def clear_federated_identity(self):
if self.has_federated_identity_:
self.has_federated_identity_ = 0
self.federated_identity_ = ""
def has_federated_identity(self): return self.has_federated_identity_
def MergeFrom(self, x):
assert x is not self
if (x.has_destination_url()): self.set_destination_url(x.destination_url())
if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
if (x.has_federated_identity()): self.set_federated_identity(x.federated_identity())
def Equals(self, x):
if x is self: return 1
if self.has_destination_url_ != x.has_destination_url_: return 0
if self.has_destination_url_ and self.destination_url_ != x.destination_url_: return 0
if self.has_auth_domain_ != x.has_auth_domain_: return 0
if self.has_auth_domain_ and self.auth_domain_ != x.auth_domain_: return 0
if self.has_federated_identity_ != x.has_federated_identity_: return 0
if self.has_federated_identity_ and self.federated_identity_ != x.federated_identity_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_destination_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: destination_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.destination_url_))
if (self.has_auth_domain_): n += 1 + self.lengthString(len(self.auth_domain_))
if (self.has_federated_identity_): n += 1 + self.lengthString(len(self.federated_identity_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_destination_url_):
n += 1
n += self.lengthString(len(self.destination_url_))
if (self.has_auth_domain_): n += 1 + self.lengthString(len(self.auth_domain_))
if (self.has_federated_identity_): n += 1 + self.lengthString(len(self.federated_identity_))
return n
def Clear(self):
self.clear_destination_url()
self.clear_auth_domain()
self.clear_federated_identity()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.destination_url_)
if (self.has_auth_domain_):
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
if (self.has_federated_identity_):
out.putVarInt32(26)
out.putPrefixedString(self.federated_identity_)
def OutputPartial(self, out):
if (self.has_destination_url_):
out.putVarInt32(10)
out.putPrefixedString(self.destination_url_)
if (self.has_auth_domain_):
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
if (self.has_federated_identity_):
out.putVarInt32(26)
out.putPrefixedString(self.federated_identity_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_destination_url(d.getPrefixedString())
continue
if tt == 18:
self.set_auth_domain(d.getPrefixedString())
continue
if tt == 26:
self.set_federated_identity(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_destination_url_: res+=prefix+("destination_url: %s\n" % self.DebugFormatString(self.destination_url_))
if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
if self.has_federated_identity_: res+=prefix+("federated_identity: %s\n" % self.DebugFormatString(self.federated_identity_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdestination_url = 1
kauth_domain = 2
kfederated_identity = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "destination_url",
2: "auth_domain",
3: "federated_identity",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CreateLoginURLRequest'
class CreateLoginURLResponse(ProtocolBuffer.ProtocolMessage):
has_login_url_ = 0
login_url_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def login_url(self): return self.login_url_
def set_login_url(self, x):
self.has_login_url_ = 1
self.login_url_ = x
def clear_login_url(self):
if self.has_login_url_:
self.has_login_url_ = 0
self.login_url_ = ""
def has_login_url(self): return self.has_login_url_
def MergeFrom(self, x):
assert x is not self
if (x.has_login_url()): self.set_login_url(x.login_url())
def Equals(self, x):
if x is self: return 1
if self.has_login_url_ != x.has_login_url_: return 0
if self.has_login_url_ and self.login_url_ != x.login_url_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_login_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: login_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.login_url_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_login_url_):
n += 1
n += self.lengthString(len(self.login_url_))
return n
def Clear(self):
self.clear_login_url()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.login_url_)
def OutputPartial(self, out):
if (self.has_login_url_):
out.putVarInt32(10)
out.putPrefixedString(self.login_url_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_login_url(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_login_url_: res+=prefix+("login_url: %s\n" % self.DebugFormatString(self.login_url_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
klogin_url = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "login_url",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CreateLoginURLResponse'
class CreateLogoutURLRequest(ProtocolBuffer.ProtocolMessage):
has_destination_url_ = 0
destination_url_ = ""
has_auth_domain_ = 0
auth_domain_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def destination_url(self): return self.destination_url_
def set_destination_url(self, x):
self.has_destination_url_ = 1
self.destination_url_ = x
def clear_destination_url(self):
if self.has_destination_url_:
self.has_destination_url_ = 0
self.destination_url_ = ""
def has_destination_url(self): return self.has_destination_url_
def auth_domain(self): return self.auth_domain_
def set_auth_domain(self, x):
self.has_auth_domain_ = 1
self.auth_domain_ = x
def clear_auth_domain(self):
if self.has_auth_domain_:
self.has_auth_domain_ = 0
self.auth_domain_ = ""
def has_auth_domain(self): return self.has_auth_domain_
def MergeFrom(self, x):
assert x is not self
if (x.has_destination_url()): self.set_destination_url(x.destination_url())
if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
def Equals(self, x):
if x is self: return 1
if self.has_destination_url_ != x.has_destination_url_: return 0
if self.has_destination_url_ and self.destination_url_ != x.destination_url_: return 0
if self.has_auth_domain_ != x.has_auth_domain_: return 0
if self.has_auth_domain_ and self.auth_domain_ != x.auth_domain_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_destination_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: destination_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.destination_url_))
if (self.has_auth_domain_): n += 1 + self.lengthString(len(self.auth_domain_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_destination_url_):
n += 1
n += self.lengthString(len(self.destination_url_))
if (self.has_auth_domain_): n += 1 + self.lengthString(len(self.auth_domain_))
return n
def Clear(self):
self.clear_destination_url()
self.clear_auth_domain()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.destination_url_)
if (self.has_auth_domain_):
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
def OutputPartial(self, out):
if (self.has_destination_url_):
out.putVarInt32(10)
out.putPrefixedString(self.destination_url_)
if (self.has_auth_domain_):
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_destination_url(d.getPrefixedString())
continue
if tt == 18:
self.set_auth_domain(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_destination_url_: res+=prefix+("destination_url: %s\n" % self.DebugFormatString(self.destination_url_))
if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdestination_url = 1
kauth_domain = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "destination_url",
2: "auth_domain",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CreateLogoutURLRequest'
class CreateLogoutURLResponse(ProtocolBuffer.ProtocolMessage):
has_logout_url_ = 0
logout_url_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def logout_url(self): return self.logout_url_
def set_logout_url(self, x):
self.has_logout_url_ = 1
self.logout_url_ = x
def clear_logout_url(self):
if self.has_logout_url_:
self.has_logout_url_ = 0
self.logout_url_ = ""
def has_logout_url(self): return self.has_logout_url_
def MergeFrom(self, x):
assert x is not self
if (x.has_logout_url()): self.set_logout_url(x.logout_url())
def Equals(self, x):
if x is self: return 1
if self.has_logout_url_ != x.has_logout_url_: return 0
if self.has_logout_url_ and self.logout_url_ != x.logout_url_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_logout_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: logout_url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.logout_url_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_logout_url_):
n += 1
n += self.lengthString(len(self.logout_url_))
return n
def Clear(self):
self.clear_logout_url()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.logout_url_)
def OutputPartial(self, out):
if (self.has_logout_url_):
out.putVarInt32(10)
out.putPrefixedString(self.logout_url_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_logout_url(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_logout_url_: res+=prefix+("logout_url: %s\n" % self.DebugFormatString(self.logout_url_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
klogout_url = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "logout_url",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CreateLogoutURLResponse'
class GetOAuthUserRequest(ProtocolBuffer.ProtocolMessage):
has_scope_ = 0
scope_ = ""
def __init__(self, contents=None):
self.scopes_ = []
if contents is not None: self.MergeFromString(contents)
def scope(self): return self.scope_
def set_scope(self, x):
self.has_scope_ = 1
self.scope_ = x
def clear_scope(self):
if self.has_scope_:
self.has_scope_ = 0
self.scope_ = ""
def has_scope(self): return self.has_scope_
def scopes_size(self): return len(self.scopes_)
def scopes_list(self): return self.scopes_
def scopes(self, i):
return self.scopes_[i]
def set_scopes(self, i, x):
self.scopes_[i] = x
def add_scopes(self, x):
self.scopes_.append(x)
def clear_scopes(self):
self.scopes_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_scope()): self.set_scope(x.scope())
for i in xrange(x.scopes_size()): self.add_scopes(x.scopes(i))
def Equals(self, x):
if x is self: return 1
if self.has_scope_ != x.has_scope_: return 0
if self.has_scope_ and self.scope_ != x.scope_: return 0
if len(self.scopes_) != len(x.scopes_): return 0
for e1, e2 in zip(self.scopes_, x.scopes_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_scope_): n += 1 + self.lengthString(len(self.scope_))
n += 1 * len(self.scopes_)
for i in xrange(len(self.scopes_)): n += self.lengthString(len(self.scopes_[i]))
return n
def ByteSizePartial(self):
n = 0
if (self.has_scope_): n += 1 + self.lengthString(len(self.scope_))
n += 1 * len(self.scopes_)
for i in xrange(len(self.scopes_)): n += self.lengthString(len(self.scopes_[i]))
return n
def Clear(self):
self.clear_scope()
self.clear_scopes()
def OutputUnchecked(self, out):
if (self.has_scope_):
out.putVarInt32(10)
out.putPrefixedString(self.scope_)
for i in xrange(len(self.scopes_)):
out.putVarInt32(18)
out.putPrefixedString(self.scopes_[i])
def OutputPartial(self, out):
if (self.has_scope_):
out.putVarInt32(10)
out.putPrefixedString(self.scope_)
for i in xrange(len(self.scopes_)):
out.putVarInt32(18)
out.putPrefixedString(self.scopes_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_scope(d.getPrefixedString())
continue
if tt == 18:
self.add_scopes(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_scope_: res+=prefix+("scope: %s\n" % self.DebugFormatString(self.scope_))
cnt=0
for e in self.scopes_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("scopes%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kscope = 1
kscopes = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "scope",
2: "scopes",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.GetOAuthUserRequest'
class GetOAuthUserResponse(ProtocolBuffer.ProtocolMessage):
has_email_ = 0
email_ = ""
has_user_id_ = 0
user_id_ = ""
has_auth_domain_ = 0
auth_domain_ = ""
has_user_organization_ = 0
user_organization_ = ""
has_is_admin_ = 0
is_admin_ = 0
has_client_id_ = 0
client_id_ = ""
def __init__(self, contents=None):
self.scopes_ = []
if contents is not None: self.MergeFromString(contents)
def email(self): return self.email_
def set_email(self, x):
self.has_email_ = 1
self.email_ = x
def clear_email(self):
if self.has_email_:
self.has_email_ = 0
self.email_ = ""
def has_email(self): return self.has_email_
def user_id(self): return self.user_id_
def set_user_id(self, x):
self.has_user_id_ = 1
self.user_id_ = x
def clear_user_id(self):
if self.has_user_id_:
self.has_user_id_ = 0
self.user_id_ = ""
def has_user_id(self): return self.has_user_id_
def auth_domain(self): return self.auth_domain_
def set_auth_domain(self, x):
self.has_auth_domain_ = 1
self.auth_domain_ = x
def clear_auth_domain(self):
if self.has_auth_domain_:
self.has_auth_domain_ = 0
self.auth_domain_ = ""
def has_auth_domain(self): return self.has_auth_domain_
def user_organization(self): return self.user_organization_
def set_user_organization(self, x):
self.has_user_organization_ = 1
self.user_organization_ = x
def clear_user_organization(self):
if self.has_user_organization_:
self.has_user_organization_ = 0
self.user_organization_ = ""
def has_user_organization(self): return self.has_user_organization_
def is_admin(self): return self.is_admin_
def set_is_admin(self, x):
self.has_is_admin_ = 1
self.is_admin_ = x
def clear_is_admin(self):
if self.has_is_admin_:
self.has_is_admin_ = 0
self.is_admin_ = 0
def has_is_admin(self): return self.has_is_admin_
def client_id(self): return self.client_id_
def set_client_id(self, x):
self.has_client_id_ = 1
self.client_id_ = x
def clear_client_id(self):
if self.has_client_id_:
self.has_client_id_ = 0
self.client_id_ = ""
def has_client_id(self): return self.has_client_id_
def scopes_size(self): return len(self.scopes_)
def scopes_list(self): return self.scopes_
def scopes(self, i):
return self.scopes_[i]
def set_scopes(self, i, x):
self.scopes_[i] = x
def add_scopes(self, x):
self.scopes_.append(x)
def clear_scopes(self):
self.scopes_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_email()): self.set_email(x.email())
if (x.has_user_id()): self.set_user_id(x.user_id())
if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
if (x.has_user_organization()): self.set_user_organization(x.user_organization())
if (x.has_is_admin()): self.set_is_admin(x.is_admin())
if (x.has_client_id()): self.set_client_id(x.client_id())
for i in xrange(x.scopes_size()): self.add_scopes(x.scopes(i))
def Equals(self, x):
if x is self: return 1
if self.has_email_ != x.has_email_: return 0
if self.has_email_ and self.email_ != x.email_: return 0
if self.has_user_id_ != x.has_user_id_: return 0
if self.has_user_id_ and self.user_id_ != x.user_id_: return 0
if self.has_auth_domain_ != x.has_auth_domain_: return 0
if self.has_auth_domain_ and self.auth_domain_ != x.auth_domain_: return 0
if self.has_user_organization_ != x.has_user_organization_: return 0
if self.has_user_organization_ and self.user_organization_ != x.user_organization_: return 0
if self.has_is_admin_ != x.has_is_admin_: return 0
if self.has_is_admin_ and self.is_admin_ != x.is_admin_: return 0
if self.has_client_id_ != x.has_client_id_: return 0
if self.has_client_id_ and self.client_id_ != x.client_id_: return 0
if len(self.scopes_) != len(x.scopes_): return 0
for e1, e2 in zip(self.scopes_, x.scopes_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_email_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: email not set.')
if (not self.has_user_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: user_id not set.')
if (not self.has_auth_domain_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: auth_domain not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.email_))
n += self.lengthString(len(self.user_id_))
n += self.lengthString(len(self.auth_domain_))
if (self.has_user_organization_): n += 1 + self.lengthString(len(self.user_organization_))
if (self.has_is_admin_): n += 2
if (self.has_client_id_): n += 1 + self.lengthString(len(self.client_id_))
n += 1 * len(self.scopes_)
for i in xrange(len(self.scopes_)): n += self.lengthString(len(self.scopes_[i]))
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_email_):
n += 1
n += self.lengthString(len(self.email_))
if (self.has_user_id_):
n += 1
n += self.lengthString(len(self.user_id_))
if (self.has_auth_domain_):
n += 1
n += self.lengthString(len(self.auth_domain_))
if (self.has_user_organization_): n += 1 + self.lengthString(len(self.user_organization_))
if (self.has_is_admin_): n += 2
if (self.has_client_id_): n += 1 + self.lengthString(len(self.client_id_))
n += 1 * len(self.scopes_)
for i in xrange(len(self.scopes_)): n += self.lengthString(len(self.scopes_[i]))
return n
def Clear(self):
self.clear_email()
self.clear_user_id()
self.clear_auth_domain()
self.clear_user_organization()
self.clear_is_admin()
self.clear_client_id()
self.clear_scopes()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.email_)
out.putVarInt32(18)
out.putPrefixedString(self.user_id_)
out.putVarInt32(26)
out.putPrefixedString(self.auth_domain_)
if (self.has_user_organization_):
out.putVarInt32(34)
out.putPrefixedString(self.user_organization_)
if (self.has_is_admin_):
out.putVarInt32(40)
out.putBoolean(self.is_admin_)
if (self.has_client_id_):
out.putVarInt32(50)
out.putPrefixedString(self.client_id_)
for i in xrange(len(self.scopes_)):
out.putVarInt32(58)
out.putPrefixedString(self.scopes_[i])
def OutputPartial(self, out):
if (self.has_email_):
out.putVarInt32(10)
out.putPrefixedString(self.email_)
if (self.has_user_id_):
out.putVarInt32(18)
out.putPrefixedString(self.user_id_)
if (self.has_auth_domain_):
out.putVarInt32(26)
out.putPrefixedString(self.auth_domain_)
if (self.has_user_organization_):
out.putVarInt32(34)
out.putPrefixedString(self.user_organization_)
if (self.has_is_admin_):
out.putVarInt32(40)
out.putBoolean(self.is_admin_)
if (self.has_client_id_):
out.putVarInt32(50)
out.putPrefixedString(self.client_id_)
for i in xrange(len(self.scopes_)):
out.putVarInt32(58)
out.putPrefixedString(self.scopes_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_email(d.getPrefixedString())
continue
if tt == 18:
self.set_user_id(d.getPrefixedString())
continue
if tt == 26:
self.set_auth_domain(d.getPrefixedString())
continue
if tt == 34:
self.set_user_organization(d.getPrefixedString())
continue
if tt == 40:
self.set_is_admin(d.getBoolean())
continue
if tt == 50:
self.set_client_id(d.getPrefixedString())
continue
if tt == 58:
self.add_scopes(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_email_: res+=prefix+("email: %s\n" % self.DebugFormatString(self.email_))
if self.has_user_id_: res+=prefix+("user_id: %s\n" % self.DebugFormatString(self.user_id_))
if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
if self.has_user_organization_: res+=prefix+("user_organization: %s\n" % self.DebugFormatString(self.user_organization_))
if self.has_is_admin_: res+=prefix+("is_admin: %s\n" % self.DebugFormatBool(self.is_admin_))
if self.has_client_id_: res+=prefix+("client_id: %s\n" % self.DebugFormatString(self.client_id_))
cnt=0
for e in self.scopes_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("scopes%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kemail = 1
kuser_id = 2
kauth_domain = 3
kuser_organization = 4
kis_admin = 5
kclient_id = 6
kscopes = 7
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "email",
2: "user_id",
3: "auth_domain",
4: "user_organization",
5: "is_admin",
6: "client_id",
7: "scopes",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.STRING,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.GetOAuthUserResponse'
class CheckOAuthSignatureRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CheckOAuthSignatureRequest'
class CheckOAuthSignatureResponse(ProtocolBuffer.ProtocolMessage):
has_oauth_consumer_key_ = 0
oauth_consumer_key_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def oauth_consumer_key(self): return self.oauth_consumer_key_
def set_oauth_consumer_key(self, x):
self.has_oauth_consumer_key_ = 1
self.oauth_consumer_key_ = x
def clear_oauth_consumer_key(self):
if self.has_oauth_consumer_key_:
self.has_oauth_consumer_key_ = 0
self.oauth_consumer_key_ = ""
def has_oauth_consumer_key(self): return self.has_oauth_consumer_key_
def MergeFrom(self, x):
assert x is not self
if (x.has_oauth_consumer_key()): self.set_oauth_consumer_key(x.oauth_consumer_key())
def Equals(self, x):
if x is self: return 1
if self.has_oauth_consumer_key_ != x.has_oauth_consumer_key_: return 0
if self.has_oauth_consumer_key_ and self.oauth_consumer_key_ != x.oauth_consumer_key_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_oauth_consumer_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: oauth_consumer_key not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.oauth_consumer_key_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_oauth_consumer_key_):
n += 1
n += self.lengthString(len(self.oauth_consumer_key_))
return n
def Clear(self):
self.clear_oauth_consumer_key()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.oauth_consumer_key_)
def OutputPartial(self, out):
if (self.has_oauth_consumer_key_):
out.putVarInt32(10)
out.putPrefixedString(self.oauth_consumer_key_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_oauth_consumer_key(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_oauth_consumer_key_: res+=prefix+("oauth_consumer_key: %s\n" % self.DebugFormatString(self.oauth_consumer_key_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
koauth_consumer_key = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "oauth_consumer_key",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CheckOAuthSignatureResponse'
if _extension_runtime:
pass
__all__ = ['UserServiceError','CreateLoginURLRequest','CreateLoginURLResponse','CreateLogoutURLRequest','CreateLogoutURLResponse','GetOAuthUserRequest','GetOAuthUserResponse','CheckOAuthSignatureRequest','CheckOAuthSignatureResponse']
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import functools
from prjxray.tile_segbits import PsuedoPipType
from .database.connection_db_utils import get_node_pkey, get_wires_in_node, get_wire
ZERO_NET = -1
ONE_NET = -2
DEBUG = False
# The following constants are used to wrap lists so that 1-length
# lists do not get trimmed away producing errors during parsing
#
# Error example:
# - Input: [list a b c [list d e] [list f] [list g h]]
# - Output: a b c {d e} f {g h}
#
# In this example, the `f` list gets evaluated into a string, which is not
# the expected result
#
# Correct example:
# - Input: [list a b c " [list d e] " " [list f] " " [list g h] " ]]
# - Output: a b c {d e} {f} {g h}
#
# The correct example has the `f` list correctly evaluated into an
# actual list
TCL_LIST_OPEN = '"[list '
TCL_LIST_CLOSE = '] "'
def create_check_downstream_default(conn, db):
""" Returns check_for_default function. """
c = conn.cursor()
@functools.lru_cache(maxsize=None)
def check_for_default(wire_in_tile_pkey):
""" Returns downstream wire_in_tile_pkey from given wire_in_tile_pkey.
This function traverses "always" ppips downstream.
Returns None if no ppips are found for the given wire_in_tile_pkey.
"""
c.execute(
"SELECT name, phy_tile_type_pkey FROM wire_in_tile WHERE pkey = ?",
(wire_in_tile_pkey, ))
name, phy_tile_type_pkey = c.fetchone()
c.execute("SELECT name FROM tile_type WHERE pkey = ?",
(phy_tile_type_pkey, ))
tile_type = c.fetchone()[0]
tile = db.get_tile_segbits(tile_type)
for k in tile.ppips:
parts = k.split('.')
assert len(parts) == 3
assert parts[0] == tile_type
if parts[2] == name and tile.ppips[k] == PsuedoPipType.ALWAYS:
downstream_wire = parts[1]
c.execute(
"SELECT pkey FROM wire_in_tile WHERE name = ? AND phy_tile_type_pkey = ?;",
(downstream_wire, phy_tile_type_pkey))
downstream_wire_in_tile_pkey = c.fetchone()[0]
return downstream_wire_in_tile_pkey, k
return None, None
return check_for_default
def find_downstream_node(conn, check_downstream_default, source_node_pkey):
""" Finds a downstream node starting from source_node_pkey.
This function only traverses "always" ppips downstream, not active pips.
Returns None if no nodes downstream are connected via "always" ppips.
"""
c = conn.cursor()
for wire_pkey in get_wires_in_node(conn, source_node_pkey):
c.execute(
"SELECT phy_tile_pkey, wire_in_tile_pkey FROM wire WHERE pkey = ?",
(wire_pkey, ))
phy_tile_pkey, wire_in_tile_pkey = c.fetchone()
downstream_wire_in_tile_pkey, pip = check_downstream_default(
wire_in_tile_pkey)
if downstream_wire_in_tile_pkey is not None:
pip = replace_tile(c, pip, phy_tile_pkey)
c.execute(
"SELECT node_pkey FROM wire WHERE phy_tile_pkey = ? AND wire_in_tile_pkey = ?",
(
phy_tile_pkey,
downstream_wire_in_tile_pkey,
))
downstream_node_pkey = c.fetchone()[0]
return downstream_node_pkey, pip
return None, None
def output_builder(fixed_route, first_run=False):
yield TCL_LIST_OPEN
for i in fixed_route:
if type(i) is list:
for i2 in output_builder(i):
yield i2
else:
yield i
yield TCL_LIST_CLOSE
class Net(object):
""" Object to present a net (e.g. a source and it sinks). """
def __init__(self, source_wire_pkey):
""" Create a net.
source_wire_pkey (int): A pkey from the wire table that is the source
of this net. This wire must be the wire connected to a site pin.
"""
self.source_wire_pkey = source_wire_pkey
self.route_wire_pkeys = set()
self.parent_nodes = {}
self.incoming_wire_map = {}
self.pips = {}
def add_node(self,
conn,
net_map,
node_pkey,
parent_node_pkey,
incoming_wire_pkey=None,
pip=None):
""" Add a node to a net.
node_pkey (int): A pkey from the node table that is part of this net.
parent_node_pkey (int): A pkey from the node table that is the source
for node_pkey.
incoming_wire_pkey (int): incoming_wire_pkey is the wire_pkey that is
the connecting wire in this node. For example, if this node is
connected to the net via a pip, then incoming_wire_pkey should be
the source wire_pkey from the pip. This is important when dealing
with bidirection pips.
pip (str): Name of pip that connects this node to it's parent node.
Pip format is "{tile_name}.{wire1}.{wire0}".
"""
if DEBUG:
print('// sink node {} connected to source {}'.format(
node_pkey, self.source_wire_pkey))
if incoming_wire_pkey is not None:
assert node_pkey not in self.incoming_wire_map, node_pkey
self.incoming_wire_map[node_pkey] = incoming_wire_pkey
if pip is not None:
self.pips[node_pkey] = pip
self.parent_nodes[node_pkey] = parent_node_pkey
for wire_pkey in get_wires_in_node(conn, node_pkey):
if wire_pkey not in net_map:
net_map[wire_pkey] = set()
net_map[wire_pkey].add(self.source_wire_pkey)
self.route_wire_pkeys.add(wire_pkey)
def expand_source(self, conn, check_downstream_default, net_map):
""" Propigate net downstream through trival PPIP connections. """
source_node_pkey = get_node_pkey(conn, self.source_wire_pkey)
while True:
parent_node_pkey = source_node_pkey
source_node_pkey, pip = find_downstream_node(
conn, check_downstream_default, source_node_pkey)
if source_node_pkey is not None:
self.add_node(
conn, net_map, source_node_pkey, parent_node_pkey, pip=pip)
else:
break
def prune_antennas(self, sink_node_pkeys):
""" Remove entries from parent_nodes that belong to antenna wires.
The expand_source may add entires in parent_nodes that are
disconnected. hese nodes should be removed prior to outputting fixed
routes.
"""
alive_nodes = set()
for node in self.parent_nodes.keys():
if node in sink_node_pkeys:
while node in self.parent_nodes:
alive_nodes.add(node)
node = self.parent_nodes[node]
dead_nodes = set(self.parent_nodes.keys()) - alive_nodes
for dead_node in dead_nodes:
del self.parent_nodes[dead_node]
if dead_node in self.pips:
del self.pips[dead_node]
def is_net_alive(self):
""" True if this net is connected to sinks.
Call this method after invoked prune_antennas to avoid false positives.
"""
return len(self.parent_nodes) > 0
def make_fixed_route(self, conn, wire_pkey_to_wire):
""" Yields a TCL statement that is the value for the FIXED_ROUTE param.
Should invoke this method after calling prune_antennas.
"""
source_to_sink_node_map = {}
for sink, src in self.parent_nodes.items():
if src not in source_to_sink_node_map:
source_to_sink_node_map[src] = []
source_to_sink_node_map[src].append(sink)
c = conn.cursor()
def get_a_wire(node_pkey):
c.execute(
"SELECT phy_tile_pkey, wire_in_tile_pkey FROM wire WHERE node_pkey = ? LIMIT 1",
(node_pkey, ))
(
phy_tile_pkey,
wire_in_tile_pkey,
) = c.fetchone()
c.execute("SELECT name FROM phy_tile WHERE pkey = ?",
(phy_tile_pkey, ))
tile_name = c.fetchone()[0]
c.execute("SELECT name FROM wire_in_tile WHERE pkey = ?",
(wire_in_tile_pkey, ))
wire_name = c.fetchone()[0]
return tile_name + '/' + wire_name
def descend_fixed_route(source_node_pkey, fixed_route):
if source_node_pkey in self.incoming_wire_map:
c.execute(
"SELECT wire_in_tile_pkey, phy_tile_pkey FROM wire WHERE pkey = ?",
(self.incoming_wire_map[source_node_pkey], ))
wire_in_tile_pkey, phy_tile_pkey = c.fetchone()
c.execute("SELECT name FROM phy_tile WHERE pkey = ?",
(phy_tile_pkey, ))
(tile_name, ) = c.fetchone()
c.execute("SELECT name FROM wire_in_tile WHERE pkey = ?",
(wire_in_tile_pkey, ))
(wire_name, ) = c.fetchone()
wire_name = tile_name + '/' + wire_name
else:
# We don't have a specific upstream wire, use any from the node
wire_name = get_a_wire(source_node_pkey)
wire_name = '[get_nodes -of_object [get_wires {}]]'.format(
wire_name)
fixed_route.append(wire_name)
if source_node_pkey not in source_to_sink_node_map:
return
descend_routes = []
for _ in range(len(source_to_sink_node_map[source_node_pkey]) - 1):
fixed_route.append([])
descend_routes.append(fixed_route[-1])
descend_routes.append(fixed_route)
for idx, next_node_pkey in enumerate(
source_to_sink_node_map[source_node_pkey]):
descend_fixed_route(next_node_pkey, descend_routes[idx])
if self.source_wire_pkey not in [ZERO_NET, ONE_NET]:
fixed_route = []
descend_fixed_route(
get_node_pkey(conn, self.source_wire_pkey), fixed_route)
for i in output_builder(fixed_route):
yield i
else:
source_nodes = []
for node, parent_node in self.parent_nodes.items():
if parent_node == self.source_wire_pkey:
source_nodes.append(node)
yield TCL_LIST_OPEN
for source_node in source_nodes:
yield '('
fixed_route = []
descend_fixed_route(source_node, fixed_route)
for i in output_builder(fixed_route):
yield i
yield ')'
yield TCL_LIST_CLOSE
def output_pips(self, out):
""" Append list of pips from this net to list in out.
out (list) - List of append too.
"""
# TODO: Output pips into route tree, rather than flat list.
for _, pip in self.pips.items():
tile, dest, src = pip.split('.')
out.append((tile, src, dest))
def create_check_for_default(db, conn):
""" Returns check_for_default function. """
c = conn.cursor()
@functools.lru_cache(maxsize=None)
def check_for_default(wire_in_tile_pkey):
""" Returns upstream wire_in_tile_pkey from given wire_in_tile_pkey.
This function traverses "always" or "default" ppips upstream. Because
this function will traverse "default" ppips, it should only be invoked
on wire_in_tile_pkey that have no active upstream pips, otherwise an
invalid connection could be made.
Returns None if no ppips are found for the given wire_in_tile_pkey.
"""
c.execute(
"SELECT name, phy_tile_type_pkey FROM wire_in_tile WHERE pkey = ?",
(wire_in_tile_pkey, ))
name, phy_tile_type_pkey = c.fetchone()
c.execute("SELECT name FROM tile_type WHERE pkey = ?",
(phy_tile_type_pkey, ))
tile_type = c.fetchone()[0]
tile = db.get_tile_segbits(tile_type)
# The xMUX wires have multiple "hint" connections. Deal with them
# specially.
if name in [
'CLBLM_L_AMUX',
'CLBLM_L_BMUX',
'CLBLM_L_CMUX',
'CLBLM_L_DMUX',
'CLBLM_M_AMUX',
'CLBLM_M_BMUX',
'CLBLM_M_CMUX',
'CLBLM_M_DMUX',
'CLBLL_L_AMUX',
'CLBLL_L_BMUX',
'CLBLL_L_CMUX',
'CLBLL_L_DMUX',
'CLBLL_LL_AMUX',
'CLBLL_LL_BMUX',
'CLBLL_LL_CMUX',
'CLBLL_LL_DMUX',
]:
upstream_wire = name.replace('MUX', '')
c.execute(
"SELECT pkey FROM wire_in_tile WHERE name = ? AND phy_tile_type_pkey = ?;",
(upstream_wire, phy_tile_type_pkey))
upstream_wire_in_tile_pkey = c.fetchone()[0]
return upstream_wire_in_tile_pkey, None
for k in tile.ppips:
parts = k.split('.')
assert len(parts) == 3
if k.startswith('{}.{}.'.format(tile_type, name)):
assert tile.ppips[k] in [
PsuedoPipType.ALWAYS, PsuedoPipType.DEFAULT
], (k, tile.ppips[k])
upstream_wire = parts[2]
c.execute(
"SELECT pkey FROM wire_in_tile WHERE name = ? AND phy_tile_type_pkey = ?;",
(upstream_wire, phy_tile_type_pkey))
upstream_wire_in_tile_pkey = c.fetchone()[0]
return upstream_wire_in_tile_pkey, k
return None, None
return check_for_default
def replace_tile(c, pip, phy_tile_pkey):
""" Replace tile type with tile name for given pip.
c (sqlite3.Cursor) - Cursor pointing to connection database.
pip (str) - Pip string in form of "{tile_type}.{wire1}.{wire0}"
phy_tile_pkey (int) - phy_tile primary key containing this pip.
Returns pip string in form of "{tile_name}.{wire1}.{wire0}".
"""
c.execute("SELECT name FROM phy_tile WHERE pkey = ?", (phy_tile_pkey, ))
tile_name = c.fetchone()[0]
c.execute(
"SELECT name FROM tile_type WHERE pkey = (SELECT tile_type_pkey FROM phy_tile WHERE pkey = ?)",
(phy_tile_pkey, ))
tile_type = c.fetchone()[0]
assert pip.startswith(tile_type), (pip, tile_name, tile_type)
assert pip[len(tile_type)] == '.', (pip, tile_name)
return tile_name + pip[len(tile_type):]
def expand_sink(conn, check_for_default, nets, net_map, source_to_sink_pip_map,
sink_wire_pkey, allow_orphan_sinks):
""" Attempt to expand a sink to its source. """
if sink_wire_pkey in net_map:
return
c = conn.cursor()
c.execute(
"SELECT wire_in_tile_pkey, phy_tile_pkey FROM wire WHERE pkey = ?",
(sink_wire_pkey, ))
wire_in_tile_pkey, phy_tile_pkey = c.fetchone()
c.execute("SELECT name FROM phy_tile WHERE pkey = ?", (phy_tile_pkey, ))
(tile_name, ) = c.fetchone()
c.execute("SELECT name FROM wire_in_tile WHERE pkey = ?",
(wire_in_tile_pkey, ))
(wire_name, ) = c.fetchone()
# Check if the sink is in the PSS tile of Zynq
is_pss = tile_name.startswith("PSS")
if DEBUG:
print('//', tile_name, wire_name, sink_wire_pkey)
sink_node_pkey = get_node_pkey(conn, sink_wire_pkey)
# Check if there is an upstream active pip on this node.
for node_wire_pkey in get_wires_in_node(conn, sink_node_pkey):
assert node_wire_pkey not in net_map
if node_wire_pkey in source_to_sink_pip_map:
upstream_sink_wire_pkey, pip = source_to_sink_pip_map[
node_wire_pkey]
if upstream_sink_wire_pkey not in net_map:
expand_sink(
conn=conn,
check_for_default=check_for_default,
nets=nets,
net_map=net_map,
source_to_sink_pip_map=source_to_sink_pip_map,
sink_wire_pkey=upstream_sink_wire_pkey,
allow_orphan_sinks=allow_orphan_sinks)
if upstream_sink_wire_pkey in net_map:
if DEBUG:
print(
'// {}/{} is connected to net via wire_pkey {}'.format(
tile_name, wire_name, upstream_sink_wire_pkey))
for net in net_map[upstream_sink_wire_pkey]:
nets[net].add_node(
conn=conn,
net_map=net_map,
node_pkey=sink_node_pkey,
parent_node_pkey=get_node_pkey(
conn, upstream_sink_wire_pkey),
incoming_wire_pkey=node_wire_pkey,
pip=pip)
return
# There are no active pips upstream from this node, check if this is a
# site pin connected to a HARD0 or HARD1 pin. These are connected to the
# global ZERO_NET or ONE_NET.
c.execute("SELECT site_wire_pkey FROM node WHERE pkey = ?",
(sink_node_pkey, ))
site_wire_pkey = c.fetchone()[0]
if site_wire_pkey is not None:
upstream_sink_wire_in_tile_pkey, pip = check_for_default(
wire_in_tile_pkey)
if upstream_sink_wire_in_tile_pkey is not None:
if pip is not None:
pip = replace_tile(c, pip, phy_tile_pkey)
upstream_sink_wire_pkey = get_wire(
conn, phy_tile_pkey, upstream_sink_wire_in_tile_pkey)
if upstream_sink_wire_pkey in net_map:
if DEBUG:
print(
'// {}/{} is connected to net via wire_pkey {}'.format(
tile_name, wire_name, upstream_sink_wire_pkey))
for net in net_map[upstream_sink_wire_pkey]:
nets[net].add_node(
conn=conn,
net_map=net_map,
node_pkey=sink_node_pkey,
parent_node_pkey=get_node_pkey(
conn, upstream_sink_wire_pkey),
incoming_wire_pkey=sink_wire_pkey,
pip=pip,
)
return
c.execute(
"""
SELECT name, site_pin_pkey FROM wire_in_tile WHERE pkey = (
SELECT wire_in_tile_pkey FROM wire WHERE pkey = ?);""",
(site_wire_pkey, ))
wire_name, site_pin_pkey = c.fetchone()
assert site_pin_pkey is not None
c.execute("SELECT name, direction FROM site_pin WHERE pkey = ?;"
"", (site_pin_pkey, ))
site_pin, direction = c.fetchone()
if direction == 'OUT':
if DEBUG:
print('// {}/{} is connected to const'.format(
tile_name, wire_name))
if site_pin == 'HARD1':
nets[ONE_NET].add_node(
conn, net_map, sink_node_pkey, parent_node_pkey=ONE_NET)
elif site_pin == 'HARD0':
nets[ZERO_NET].add_node(
conn, net_map, sink_node_pkey, parent_node_pkey=ZERO_NET)
else:
c.execute(
"""
SELECT name FROM phy_tile WHERE pkey = (SELECT phy_tile_pkey FROM wire WHERE pkey = ?)""",
(site_wire_pkey, ))
tile = c.fetchone()[0]
assert site_pin in ['HARD1', 'HARD0'], (sink_node_pkey, tile,
wire_name, site_pin)
return
# No active pips to move upstream, find a ppip upstream
for node_wire_pkey in get_wires_in_node(conn, sink_node_pkey):
c.execute(
"SELECT phy_tile_pkey, wire_in_tile_pkey FROM wire WHERE pkey = ?;",
(node_wire_pkey, ))
phy_tile_pkey, wire_in_tile_pkey = c.fetchone()
upstream_sink_wire_in_tile_pkey, pip = check_for_default(
wire_in_tile_pkey)
if upstream_sink_wire_in_tile_pkey is not None:
if pip is not None:
pip = replace_tile(c, pip, phy_tile_pkey)
c.execute(
"SELECT pkey FROM wire WHERE wire_in_tile_pkey = ? AND phy_tile_pkey = ?;",
(
upstream_sink_wire_in_tile_pkey,
phy_tile_pkey,
))
upstream_sink_wire_pkey = c.fetchone()[0]
if upstream_sink_wire_pkey not in net_map:
expand_sink(
conn=conn,
check_for_default=check_for_default,
nets=nets,
net_map=net_map,
source_to_sink_pip_map=source_to_sink_pip_map,
sink_wire_pkey=upstream_sink_wire_pkey,
allow_orphan_sinks=allow_orphan_sinks)
if upstream_sink_wire_pkey in net_map:
if DEBUG:
print(
'// {}/{} is connected to net via wire_pkey {}'.format(
tile_name, wire_name, upstream_sink_wire_pkey))
for net in net_map[upstream_sink_wire_pkey]:
nets[net].add_node(
conn=conn,
net_map=net_map,
node_pkey=sink_node_pkey,
parent_node_pkey=get_node_pkey(
conn, upstream_sink_wire_pkey),
incoming_wire_pkey=node_wire_pkey,
pip=pip)
return
# For Zynq PSS tiles ignore unconnected sinks. The fact that a sink is
# connected or not is used to determine whether the PS7 is in use.
if is_pss:
return
# There does not appear to be an upstream connection, handle it.
if allow_orphan_sinks:
print('// ERROR, failed to find source for node = {} ({}/{})'.format(
sink_node_pkey, tile_name, wire_name))
else:
assert False, (sink_node_pkey, tile_name, wire_name, sink_wire_pkey)
def make_routes(db, conn, wire_pkey_to_wire, unrouted_sinks, unrouted_sources,
active_pips, allow_orphan_sinks, shorted_nets, nets, net_map):
""" Form nets (and their routes) based:
unrouted_sinks - Set of wire_pkeys of sinks to BELs in the graph
unrouted_sources - Set of wire_pkeys of sources from BELs in the graph
active_pips - Known active pips, (sink wire_pkey, source wire_pkey).
shorted_nets - Map of source_wire_pkey to sink_wire_pkey that represent
shorted_nets
Once nets are formed, yields wire names to their sources (which may be 0
or 1 when connected to a constant net).
"""
for wire_pkey in unrouted_sinks:
assert wire_pkey in wire_pkey_to_wire
for wire_pkey in unrouted_sources:
assert wire_pkey in wire_pkey_to_wire
source_to_sink_pip_map = {}
for sink_wire_pkey, source_wire_pkey, pip in active_pips:
assert source_wire_pkey not in source_to_sink_pip_map
source_to_sink_pip_map[source_wire_pkey] = sink_wire_pkey, pip
# Shorted nets can be treated like an active pip.
for source_wire_pkey, (sink_wire_pkey, pip) in shorted_nets.items():
assert source_wire_pkey not in source_to_sink_pip_map
source_to_sink_pip_map[source_wire_pkey] = sink_wire_pkey, pip
# Every sink should belong to exactly 1 net
# Every net should have exactly 1 source
check_downstream_default = create_check_downstream_default(conn, db)
def report_sources():
print('// Source wire pkeys:')
c = conn.cursor()
for wire_pkey in unrouted_sources:
c.execute(
"SELECT phy_tile_pkey, wire_in_tile_pkey FROM wire WHERE pkey = ?",
(wire_pkey, ))
phy_tile_pkey, wire_in_tile_pkey = c.fetchone()
c.execute("SELECT name FROM wire_in_tile WHERE pkey = ?",
(wire_in_tile_pkey, ))
(name, ) = c.fetchone()
c.execute("SELECT name FROM phy_tile WHERE pkey = ?",
(phy_tile_pkey, ))
tile = c.fetchone()[0]
print('//', wire_pkey, tile, name)
if DEBUG:
report_sources()
for wire_pkey in unrouted_sources:
nets[wire_pkey] = Net(wire_pkey)
nets[wire_pkey].add_node(
conn,
net_map,
get_node_pkey(conn, wire_pkey),
parent_node_pkey=None)
nets[wire_pkey].expand_source(conn, check_downstream_default, net_map)
del check_downstream_default
nets[ZERO_NET] = Net(ZERO_NET)
nets[ONE_NET] = Net(ONE_NET)
check_for_default = create_check_for_default(db, conn)
for wire_pkey in unrouted_sinks:
expand_sink(
conn=conn,
check_for_default=check_for_default,
nets=nets,
net_map=net_map,
source_to_sink_pip_map=source_to_sink_pip_map,
sink_wire_pkey=wire_pkey,
allow_orphan_sinks=allow_orphan_sinks)
if wire_pkey in net_map:
for source_wire_pkey in net_map[wire_pkey]:
if source_wire_pkey == ZERO_NET:
yield wire_pkey_to_wire[wire_pkey], 0
elif source_wire_pkey == ONE_NET:
yield wire_pkey_to_wire[wire_pkey], 1
else:
yield wire_pkey_to_wire[wire_pkey], wire_pkey_to_wire[
source_wire_pkey]
else:
c = conn.cursor()
c.execute(
"""
SELECT name FROM phy_tile WHERE pkey = (SELECT phy_tile_pkey FROM wire WHERE pkey = ?)""",
(wire_pkey, ))
(tile_name, ) = c.fetchone()
is_pss = tile_name.startswith("PSS")
if not is_pss and allow_orphan_sinks:
print('// ERROR, source for sink wire {} not found'.format(
wire_pkey_to_wire[wire_pkey]))
def prune_antennas(conn, nets, unrouted_sinks):
""" Prunes antenna routes from nets based on active sinks. """
active_sink_nodes = set()
for wire_pkey in unrouted_sinks:
active_sink_nodes.add(get_node_pkey(conn, wire_pkey))
for net in nets.values():
net.prune_antennas(active_sink_nodes)
|
|
from collections import OrderedDict
from datetime import datetime
import json
from classytags.core import Options, Tag
from classytags.arguments import Argument
from django import template
from django.conf import settings
from cms.plugin_rendering import PluginContext
from smartsnippets.models import SmartSnippet, SmartSnippetPointer
from smartsnippets.widgets_pool import widget_pool
register = template.Library()
@register.simple_tag(takes_context=True)
def render_widget(context, var):
request = context['request']
widget_cls = widget_pool.get_widget(var.widget)
widget_obj = widget_cls(var)
return widget_obj.render(request, context)
@register.simple_tag(takes_context=True)
def render_variable(context, var):
request = context['request']
return var.render(request, context)
@register.assignment_tag
def settings_value(what):
return widget_pool.get_settings(*what.split('.', 1))
@register.filter
def sortdict(dict_to_sort):
return OrderedDict(sorted(dict_to_sort.items()))
@register.filter
def get_item(iterable, key):
dictionary = iterable
if not hasattr(iterable, 'get'):
try:
dictionary = dict(enumerate(iterable))
except (TypeError, ):
return ''
return (dictionary or {}).get(key, '')
@register.filter
def json_get_index(json_array, index):
try:
return json.loads(json_array)[index]
except (TypeError, IndexError, ValueError, ):
return ''
@register.filter
def sortlist(list_to_sort):
return sorted(list(list_to_sort))
@register.filter
def times(number):
return range(number)
@register.filter
def split(string_to_split, delimiter=None):
if not hasattr(string_to_split, 'split'):
return []
return (string_to_split or '').split(delimiter)
@register.assignment_tag
def as_dict(**kwargs):
return OrderedDict(kwargs)
@register.assignment_tag
def current_timestamp():
return datetime.utcnow().strftime("%Y%m%d_%H%M%S_%f")
@register.assignment_tag(takes_context=True)
def from_context(context, name, sep=None, empty=None):
result_items = [
context.get(n, empty)
for n in name.split(sep or ',')]
return result_items if len(result_items) > 1 else result_items[0]
def _select_operator(args):
"""
The whole point of this function is to simulate multiple arguments
for django filters.
@args: string which represents the second argument of a filter; this is
split into two parts:
* first part will represent what operator will be returned
* second part represents the name of the key/attribute which will
be applied
@returns operator by the name specified in @args; (currently supporting:
'key' and 'attribute')
"""
args_parts = args.rsplit(',', 1)
func_by_args = lambda x: x
if len(args_parts) != 2:
return func_by_args
what, which = args_parts
if not which or what not in ('key', 'attribute'):
return func_by_args
if what == 'key':
return lambda x: x.get(which, None)
return lambda x: getattr(x, which, None)
@register.filter
def map_by(items, operator_args):
"""
Applies map with specified operator over items. Used to return a
list of specific key/attribute name from a list of dictionaries/objects.
@operator_args: string with format `<operator_name>,<operator_argument>`
Example operator_args: 'key,image' or 'attribute,color'
Currently only 'key' and 'attribute' are supported.
"""
return map(_select_operator(operator_args), items)
@register.filter
def exclude_empty(items, operator_args=None):
"""
Applies filter with specified operator over items. Used to return
items that are not considered empty. This function can exclude items
from the list that have one specific key/attribute empty by using
operator_args.
@items: strings, integers, dictionaries or custom objects.
Depending of what consideres a object to be empty the following
operators are available: key, attribute.
@operator_args: string with format `<operator_name>,<operator_argument>`.
When operator_args is missing None will be used.
Example operator_args: 'key,image' or 'attribute,color'
"""
result_items = filter(None, items)
if operator_args:
result_items = filter(_select_operator(operator_args), result_items)
return result_items
def render_rendering_error(message, debug_info):
if settings.DEBUG:
full_message = '{}. {}'.format(message, debug_info)
else:
full_message = message
return ('<script type="text/javascript">console.warn("{}")</script>'.format(
full_message))
class JSONSmartSnippet(Tag):
name = "jsonsmartsnippet"
options = Options(
Argument('config_key', resolve=False),
Argument('id_key', resolve=False),
)
def render_tag(self, context, config_key, id_key):
config = context.get(config_key) or {}
component_id = context.get(id_key) or None
metadata = config.get('metadata') or {}
snippet_id = metadata.get('snippet_id', None)
if not metadata or not snippet_id:
return render_rendering_error(
"Could not render smart snippet with UUID:{}".format(component_id),
"Full config: {}".format(config))
try:
snippet = SmartSnippet.objects.get(id=snippet_id)
except (SmartSnippet.DoesNotExist, ValueError):
return render_rendering_error(
"Could not render smart snippet with id:{}".format(snippet_id),
"Full config: {}".format(config))
fake_pointer = SmartSnippetPointer(snippet=snippet)
fake_pointer.placeholder_id = 0
fake_pointer.id = 0
fake_pointer.pk = 0
plugin_context = PluginContext(context, fake_pointer, None)
plugin_context.update(config.get('variables', {}))
try:
return snippet.render(plugin_context)
except Exception as exc:
# Rendering errors have very varied types
return render_rendering_error(
"Could not render smart snippet with id:{}. Rendering error.".format(snippet_id),
"Full config: {}, error message:{}".format(config, exc.message))
register.tag(JSONSmartSnippet)
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import os
from enum import Enum
from typing import List, Optional, Tuple
from parlai.core.message import Message
from parlai.core.metrics import F1Metric
from parlai.core.mutators import EpisodeMutator, MessageMutator, register_mutator
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.teachers import DialogTeacher
from parlai.tasks.cmu_dog.build import build
from parlai.tasks.wizard_of_wikipedia.agents import (
RareWordF1Calculator,
TOKEN_KNOWLEDGE,
TOKEN_END_KNOWLEDGE,
)
from parlai.utils.data import DatatypeHelper
from parlai.utils.io import PathManager
from parlai.utils.logging import logger
from parlai.utils.typing import TShared
SILENCE = '__SILENCE__'
class SplitType(Enum):
ORIGINAL = "original"
ORIGINAL_DEDUPED = "deduped"
SEEN = "seen"
UNSEEN = "unseen"
def _datapath(opt: Opt) -> str:
build(opt)
return os.path.join(opt['datapath'], 'cmu_dog')
def _datafile(split: str, split_type: SplitType) -> str:
"""
Returns the filename, e.g. train.json.
"""
if split_type == SplitType.ORIGINAL:
return f"{split}.json"
if split_type == SplitType.SEEN:
if 'test' in split:
return "test_seen_split_seen_unseen.json"
return f"{split}_split_seen_unseen.json"
if split_type == SplitType.UNSEEN:
if "test" not in split:
logger.warning(
"Trying to use a non-test dataset with split `unseen`. `unseen` "
"only returns the unseen test set. Are you sure you didn't mean to "
"use `seen` here?"
)
return "test_unseen_split_seen_unseen.json"
return f"{split}_deduped.json"
def _all_split_datafiles(opt: Opt) -> List[str]:
datafiles = []
split_type = SplitType(opt.get("cmu_dog_split_type"))
if split_type in {SplitType.SEEN, SplitType.UNSEEN}:
# For seen/unseen split, the full set of dialogs is split
# across train, valid, test seen, and test unseen
for split in ['train', 'valid', 'test']:
datafiles.append(_datafile(split, SplitType.SEEN))
datafiles.append(_datafile('test', SplitType.UNSEEN))
else:
for split in ['train', 'valid', 'test']:
datafiles.append(_datafile(split, split_type))
return datafiles
def _collapse_multi_msgs(history, multi_msg_delim):
"""
This dataset allows for a single user to send multiple messages in a row.
Here we use a delimiter to represent this, like: "Hey!|Nice to meet you."
"""
collapsed = []
last_msg = history[0]
for msg in history[1:]:
if last_msg["uid"] == msg["uid"]:
last_msg["text"] = multi_msg_delim.join((last_msg["text"], msg["text"]))
else:
collapsed.append(last_msg)
last_msg = msg
# don't forget to add back the last message!
collapsed.append(last_msg)
return collapsed
def _article_section_to_text(
section, fact_delimiter: str, knowledge_keys: List[str] = None
) -> str:
"""
Example input:
{
"cast": [
"Ben Affleck as Batman",
"Henry Cavill as Superman",
],
"director": "Zack Snyder"
}
Example output:
"cast:Ben Affleck as Batman,Henry Cavill as Superman;director:Zack Snyder"
"""
if not section:
return section
if isinstance(section, str):
return section
texts = []
for k, v in section.items():
if knowledge_keys and k not in knowledge_keys:
continue
fact = f"{k}:"
if isinstance(v, str):
fact += v
else:
fact += ",".join(v)
texts.append(fact)
return fact_delimiter.join(texts)
def _build_rare_word_f1(opt: Opt) -> RareWordF1Calculator:
datapath = _datapath(opt)
def _collect_convo_text(convo_data):
convo_texts = []
for conv in convo_data.values():
# get all messages
convo_texts.append(' '.join([m['text'] for m in conv['history']]))
return convo_texts
# use conversation data from all splits for consistency
convos = []
convo_files = _all_split_datafiles(opt)
for fname in convo_files:
with PathManager.open(os.path.join(datapath, f"conversations/{fname}")) as f:
data = json.load(f)
convos += _collect_convo_text(data)
return RareWordF1Calculator(' '.join(convos), top_p=0.5)
class CMUDocumentGroundedConversationsTeacher(DialogTeacher):
"""
CMU Document Grounded Conversations Dataset (aka CMU_DoG)
Paper: https://arxiv.org/pdf/1809.07358.pdf
Source: https://github.com/festvox/datasets-CMU_DoG
"""
def __init__(self, opt: Opt, shared: TShared = None):
opt = copy.deepcopy(opt)
self.delimiter = opt.get('delimiter', '\n')
split_type = SplitType(opt.get("cmu_dog_split_type"))
if split_type == SplitType.ORIGINAL:
logger.warning(
"`original` split type contains duplicate conversations across train, "
"valid, and test. See https://github.com/festvox/datasets-CMU_DoG/issues/2 "
"for more detail."
)
opt['datafile'] = _datafile(
split=DatatypeHelper.fold(opt['datatype']), split_type=split_type
)
super().__init__(opt, shared)
if shared:
self.rare_word_f1 = shared['rare_word_f1']
else:
self.rare_word_f1 = _build_rare_word_f1(opt)
@classmethod
def add_cmdline_args(
cls, parser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt=partial_opt)
cmu_dog = parser.add_argument_group("CMU Document Grounded Conversations")
cmu_dog.add_argument(
"--cmu-dog-rating",
type=int,
default=[1, 2, 3],
choices=[1, 2, 3],
nargs="+",
help='The higher the number, the better quality the conversation. '
'For each rating, the number of conversations is as follows: 1-1443, 2-2142, 3-527',
)
cmu_dog.add_argument(
"--cmu-dog-only-with-knowledge",
type=bool,
default=True,
help="Optionally train only the sides of the conversation that have access to knowledge.",
)
cmu_dog.add_argument(
"--cmu-dog-multi-msg-delimiter",
type=str,
default=" ",
help="When one agent is to send multiple messages in a row, they will be concatenated with this delimiter.",
)
cmu_dog.add_argument(
"--cmu-dog-fact-delimiter",
type=str,
default=";",
help="When a section of the knowledge contains multiple facts, they will be concatenated with this delimiter.",
)
cmu_dog.add_argument(
"--cmu-dog-include-knowledge-keys",
type=str,
default='cast,critical_response,director,genre,introduction,movieName,rating,year',
help="Comma-separated list of keys into the knowledge to include as the general conversational context",
)
cmu_dog.add_argument(
"--cmu-dog-provide-movie-context",
type=bool,
default=True,
help="Provide movie facts as the general conversational context",
)
cmu_dog.add_argument(
"--cmu-dog-split-type",
type=SplitType,
default=SplitType.ORIGINAL_DEDUPED,
choices=list(SplitType),
help=(
"`orginal`: train/valid/test split from the original release, "
"`original_deduped`: duplicate conversations removed from train set, "
"`seen`: refers to movies and is relative to training - `test seen` is conversations about movies that appear during training, "
"`unseen`: contains conversations about movies that weren't seen in `train`/`valid`/`test seen`. "
"When using seen/unseen, use `seen` for `train`/`valid`/`seen test` and `unseen` only for `unseen test`."
),
)
return parser
def share(self):
shared = super().share()
shared['rare_word_f1'] = self.rare_word_f1
return shared
def setup_data(self, datafile: str):
datapath = _datapath(self.opt)
with PathManager.open(os.path.join(datapath, f"conversations/{datafile}")) as f:
data = json.load(f)
with PathManager.open(os.path.join(datapath, "wiki_data.json")) as f:
wiki_data = json.load(f)
# Filter by rating
data = {
k: c for k, c in data.items() if c["rating"] in self.opt["cmu_dog_rating"]
}
def _can_see_info(turn, convo):
# Sometimes only one participant has access to the article
return turn["uid"] in convo["whoSawDoc"]
num_eps = len(data)
data = list(data.items())
# loop through conversations
for i in range(len(data) * 2):
conv_idx = i % num_eps
start_idx = i // num_eps
_conv_id, conv_data = data[conv_idx]
dialog = _collapse_multi_msgs(
conv_data["history"], self.opt['cmu_dog_multi_msg_delimiter']
)
movie_article = wiki_data[str(conv_data["wikiDocumentIdx"])]
if self.opt["cmu_dog_only_with_knowledge"] and not _can_see_info(
dialog[start_idx], conv_data
):
continue
# loop through turns
for idx in range(start_idx, len(dialog), 2):
label_turn = dialog[idx]
label = label_turn["text"].strip()
# The section displayed changes across the conversation
doc_idx = str(label_turn["docIdx"])
gold_knowledge = _article_section_to_text(
movie_article[doc_idx], self.opt['cmu_dog_fact_delimiter']
)
section = (
movie_article[doc_idx]
if _can_see_info(label_turn, conv_data)
else None
)
section_text = _article_section_to_text(
section,
self.opt['cmu_dog_fact_delimiter'],
self.opt.get('cmu_dog_include_knowledge_keys').split(','),
)
# By default, start conversation with silence
if idx == start_idx:
context = (
section_text
if self.opt['cmu_dog_provide_movie_context']
else SILENCE
)
else:
context = dialog[idx - 1]["text"].strip()
yield Message(
{
'text': context,
'labels': [label],
'available_knowledge_raw': section,
'available_knowledge_text': section_text,
'title': movie_article['0']['movieName'],
'checked_sentence': gold_knowledge,
}
), idx == start_idx
def custom_evaluation(
self,
teacher_action: Message,
labels: Optional[Tuple[str]],
model_response: Message,
):
if 'text' in model_response and 'checked_sentence' in teacher_action:
self.metrics.add(
'knowledge_f1',
F1Metric.compute(
model_response['text'], [teacher_action['checked_sentence']]
),
)
if 'text' in model_response and labels:
self.metrics.add(
'rare_word_f1',
self.rare_word_f1.compute(model_response['text'], labels),
)
@register_mutator("prepend_knowledge_to_message")
class PrependKnowledgeToMessageMutator(MessageMutator):
def message_mutation(self, message: Message) -> Message:
if not message.get('available_knowledge_text'):
return message
context = message.pop('text')
knowledge = f'{TOKEN_KNOWLEDGE} {message["available_knowledge_text"]} {TOKEN_END_KNOWLEDGE}'
delimiter = self.opt.get('delimiter', '\n')
message['text'] = (
knowledge if context == SILENCE else f'{knowledge}{delimiter}{context}'
)
return message
@register_mutator("knowledge_only_when_updated")
class KnowledgeWhenUpdatedMutator(EpisodeMutator):
def episode_mutation(self, episode: List[Message]) -> List[Message]:
last_knowledge = None
for msg in episode:
knowledge = msg.pop('available_knowledge_text')
if last_knowledge != knowledge:
msg['available_knowledge_text'] = knowledge
last_knowledge = knowledge
return episode
class DefaultTeacher(CMUDocumentGroundedConversationsTeacher):
pass
|
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the search host command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestSearchHost(TestBrokerCommand):
def testorphaned(self):
command = "manage --hostname unittest02.one-nyp.ms.com --sandbox orphantestuser/orphantestsandbox --force --skip_auto_compile"
self.statustest(command.split(" "))
command = ["del", "user", "--username", "orphantestuser"] + self.valid_just_sn
self.noouttest(command)
command = "search host --orphaned"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
command = "manage --hostname unittest02.one-nyp.ms.com --domain unittest"
err = self.badrequesttest(command.split(" "))
self.matchoutput(err, "Unable to determine location of sandbox due to "
"missing user record. Please manually verify "
"there are no uncommitted and unpublished "
"changes and then re-run using --force.", command)
command = "manage --hostname unittest02.one-nyp.ms.com --domain unittest --force"
self.statustest(command.split(" "))
command = "reconfigure --hostname unittest02.one-nyp.ms.com"
self.successtest(command.split(" "))
def testfqdnavailable(self):
command = "search host --hostname unittest00.one-nyp.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
def testfqdnunavailablerealdomain(self):
command = "search host --hostname does-not-exist.one-nyp.ms.com"
self.noouttest(command.split(" "))
def testfqdnunavailablefakedomain(self):
command = "search host --hostname unittest00.does-not-exist.ms.com"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "DNS Domain does-not-exist.ms.com", command)
def testfqdnavailablefull(self):
command = "search host --hostname unittest00.one-nyp.ms.com --fullinfo"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Primary Name: unittest00.one-nyp.ms.com", command)
self.matchoutput(out, "Machine: ut3c1n3", command)
def testmachineavailable(self):
command = "search host --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
def testmachineunavailable(self):
command = "search host --machine machine-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Machine machine-does-not-exist not found",
command)
def testdnsdomainavailable(self):
command = "search host --dns_domain aqd-unittest.ms.com"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest12.aqd-unittest.ms.com", command)
self.matchclean(out, "unittest00.one-nyp.ms.com", command)
def testdnsdomainunavailable(self):
command = "search host --dns_domain does-not-exist.ms.com"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "DNS Domain does-not-exist.ms.com not found",
command)
def testshortnameavailable(self):
command = "search host --shortname unittest00"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
def testshortnameunavailable(self):
command = "search host --shortname does-not-exist"
self.noouttest(command.split(" "))
def testdomainavailable(self):
command = "search host --domain unittest"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
def testdomainunavailable(self):
command = "search host --domain domain-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Domain domain-does-not-exist not found",
command)
def testsandboxavailable(self):
command = ["search_host", "--sandbox=%s/utsandbox" % self.user]
out = self.commandtest(command)
self.matchoutput(out, "server1.aqd-unittest.ms.com", command)
self.matchclean(out, "unittest00.one-nyp.ms.com", command)
def testsandboxowner(self):
command = ["search_host", "--sandbox_author=%s" % self.user]
out = self.commandtest(command)
self.matchoutput(out, "server1.aqd-unittest.ms.com", command)
self.matchclean(out, "unittest00.one-nyp.ms.com", command)
def testbranchavailable(self):
command = ["search_host", "--branch=utsandbox"]
out = self.commandtest(command)
self.matchoutput(out, "server1.aqd-unittest.ms.com", command)
self.matchclean(out, "unittest00.one-nyp.ms.com", command)
def testarchetypeavailable(self):
command = "search host --archetype aquilon"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
def testarchetypeunavailable(self):
command = "search host --archetype archetype-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Archetype archetype-does-not-exist not found",
command)
def testclusterarchetype(self):
command = "search host --cluster_archetype esx_cluster"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "evh1.aqd-unittest.ms.com", command)
def testclusterpersonality(self):
command = "search host --cluster_personality vulcan-10g-server-prod --cluster_archetype esx_cluster"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "evh1.aqd-unittest.ms.com", command)
def testbuildstatusavailable(self):
command = "search host --buildstatus ready"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
self.matchoutput(out, self.aurora_with_node, command)
self.matchoutput(out, self.aurora_without_node, command)
def testbuildstatusunavailable(self):
command = "search host --buildstatus status-does-not-exist"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "Unknown host lifecycle 'status-does-not-exist'",
command)
def testipunavailable(self):
command = "search host --ip 199.98.16.4"
self.noouttest(command.split(" "))
def testipbad(self):
command = "search host --ip not-an-ip-address"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out,
"Expected an IP address for --ip: "
"u'not-an-ip-address' does not appear to be an IPv4 or IPv6 address.",
command)
def testnetworkipavailable(self):
command = "search host --networkip %s" % self.net["unknown0"].ip
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchclean(out, "unittest00-e1.one-nyp.ms.com", command)
self.matchclean(out, "unittest00r.one-nyp.ms.com", command)
self.matchoutput(out, "unittest01.one-nyp.ms.com", command)
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
self.matchclean(out, "unittest02rsa.one-nyp.ms.com", command)
def testnetworkipunavailable(self):
command = "search host --networkip 199.98.16.0"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Network with address 199.98.16.0 not found",
command)
# utility methods for deprecation check
MAC_DEPR_STR = "The --mac option is deprecated. Please use search machine --mac instead."
SERIAL_DEPR_STR = "The --serial option is deprecated. Please use search machine --serial instead."
def testmacavailable(self):
def testfunc():
command = "search host --mac %s" % self.net["unknown0"].usable[2].mac
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.assertTruedeprecation(TestSearchHost.MAC_DEPR_STR, testfunc)
def testmacunavailable(self):
def testfunc():
command = "search host --mac 02:02:c7:62:10:04"
self.noouttest(command.split(" "))
self.assertTruedeprecation(TestSearchHost.MAC_DEPR_STR, testfunc)
def testall(self):
command = "show host --all"
out = self.commandtest(command.split(" "))
# This is a good sampling, but not the full output
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchclean(out, "unittest00r.one-nyp.ms.com", command)
self.matchclean(out, "unittest00-e1.one-nyp.ms.com", command)
self.matchoutput(out, "unittest01.one-nyp.ms.com", command)
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
self.matchclean(out, "unittest02rsa.one-nyp.ms.com", command)
self.matchoutput(out, self.aurora_with_node, command)
self.matchoutput(out, self.aurora_without_node, command)
self.matchoutput(out, "ut3gd1r01.aqd-unittest.ms.com", command)
self.matchclean(out, "ut3c1.aqd-unittest.ms.com", command)
def testpersonalityavailable(self):
command = "search host --personality compileserver"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
def testpersonalityavailable2(self):
command = "search host --archetype aquilon --personality compileserver"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchclean(out, "aquilon86.aqd-unittest.ms.com", command)
def testpersonalityunavailable(self):
command = "search host --archetype aquilon --personality personality-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Personality personality-does-not-exist, "
"archetype aquilon not found.", command)
def testpersonalityunavailable2(self):
command = "search host --personality personality-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Personality personality-does-not-exist "
"not found.", command)
def testosavailable(self):
osver = self.config.get("unittest", "linux_version_prev")
command = ["search_host", "--osname", "linux",
"--osversion", osver, "--archetype", "aquilon"]
out = self.commandtest(command)
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchclean(out, self.aurora_with_node, command)
def testosunavailable(self):
command = "search host --osname os-does-not-exist --osversion foo --archetype aquilon"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Operating System os-does-not-exist, "
"version foo, archetype aquilon not found.", command)
def testosnameonly(self):
command = "search host --osname linux"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
self.matchoutput(out, self.aurora_with_node, command)
self.matchoutput(out, self.aurora_without_node, command)
def testosversiononly(self):
command = "search host --osversion %s" % self.config.get("unittest",
"linux_version_prev")
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchoutput(out, self.aurora_with_node, command)
self.matchoutput(out, self.aurora_without_node, command)
def testserviceavailable(self):
command = "search host --service utsvc"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
def testosboundservice(self):
command = ["search_host", "--service", "ips"]
out = self.commandtest(command)
self.matchoutput(out, "aquilon69.aqd-unittest.ms.com", command)
self.matchclean(out, "unittest00", command)
self.matchclean(out, "unittest02", command)
def testserviceunavailable(self):
command = "search host --service service-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Service service-does-not-exist not found",
command)
def testserviceinstanceavailable(self):
command = "search host --service utsvc --instance utsi1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchclean(out, "unittest02.one-nyp.ms.com", command)
def testserviceinstanceunavailable(self):
command = "search host --service utsvc " \
"--instance service-instance-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out,
"Service Instance service-instance-does-not-exist, "
"service utsvc not found.",
command)
def testinstanceavailable(self):
command = "search host --instance utsi1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchclean(out, "unittest02.one-nyp.ms.com", command)
def testinstanceunavailable(self):
command = ["search_host", "--instance", "service-instance-does-not-exist"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Service Instance service-instance-does-not-exist "
"not found.",
command)
def testserverofservice00(self):
"""search host by server of service provided """
self.noouttest(["add_service", "--service", "foo"])
self.noouttest(["add", "service", "--service", "foo",
"--instance", "fooinst1"])
self.noouttest(["add", "service", "--service", "foo",
"--instance", "fooinst2"])
self.noouttest(["add_service", "--service", "baa"])
self.noouttest(["add", "service", "--service", "baa",
"--instance", "fooinst1"])
self.noouttest(["bind", "server",
"--hostname", "unittest00.one-nyp.ms.com",
"--service", "foo", "--instance", "fooinst1"])
self.noouttest(["bind", "server",
"--hostname", "unittest01.one-nyp.ms.com",
"--service", "foo", "--instance", "fooinst2"])
self.noouttest(["bind", "server",
"--hostname", "unittest02.one-nyp.ms.com",
"--service", "baa", "--instance", "fooinst1"])
command = "search host --server_of_service foo"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchoutput(out, "unittest01.one-nyp.ms.com", command)
def testserverofserviceunavailable(self):
""" search host for a service which is not defined """
command = "search host --server_of_service service-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Service service-does-not-exist not found",
command)
def testserverofservice01(self):
""" search host for a defined service and instance """
command = "search host --server_of_service foo " \
"--server_of_instance fooinst1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
def testserverofservice02(self):
""" search host for a defined instance """
command = "search host --server_of_instance fooinst1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
def testserverofservice03(self):
"""" search host for a defined service with undefined instance """
command = "search host --server_of_service foo " \
"--server_of_instance service-instance-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out,
"Service Instance service-instance-does-not-exist, "
"service foo not found",
command)
def testserverofservice04(self):
# Mix server and client side service criteria
command = "search host --server_of_service foo --instance utsi1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchclean(out, "unittest02.one-nyp.ms.com", command)
def testserverofinstanceunavailable(self):
command = ["search_host", "--server_of_instance",
"service-instance-does-not-exist"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Service Instance service-instance-does-not-exist "
"not found.",
command)
def testserverofservice05(self):
"""search host for a defined service but no servers assigned"""
self.noouttest(["unbind", "server",
"--hostname", "unittest01.one-nyp.ms.com",
"--service", "foo", "--instance", "fooinst2"])
self.noouttest(["search", "host",
"--server_of_service", "foo", "--server_of_instance", "fooinst2"])
self.noouttest(["search", "host", "--server_of_instance", "fooinst2"])
def testserverofservice06(self):
"""search host for a defined service but no servers assigned """
self.noouttest(["unbind", "server",
"--hostname", "unittest00.one-nyp.ms.com",
"--service", "foo", "--instance", "fooinst1"])
self.noouttest(["unbind", "server",
"--hostname", "unittest02.one-nyp.ms.com",
"--service", "baa", "--instance", "fooinst1"])
command = "search host --server_of_service foo"
self.noouttest(command.split(" "))
# cleanup
self.noouttest(["del", "service", "--service",
"foo", "--instance", "fooinst1"])
self.noouttest(["del", "service", "--service",
"foo", "--instance", "fooinst2"])
self.noouttest(["del", "service", "--service", "foo"])
self.noouttest(["del", "service", "--service",
"baa", "--instance", "fooinst1"])
self.noouttest(["del", "service", "--service", "baa"])
def testmodelavailable(self):
command = "search host --model dl360g9"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest15.aqd-unittest.ms.com", command)
self.matchoutput(out, "unittest16.aqd-unittest.ms.com", command)
self.matchoutput(out, "unittest17.aqd-unittest.ms.com", command)
def testmodellocation(self):
# Utilize two filters on the same table
command = "search host --model dl360g9 --building ut"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest15.aqd-unittest.ms.com", command)
self.matchoutput(out, "unittest16.aqd-unittest.ms.com", command)
self.matchoutput(out, "unittest17.aqd-unittest.ms.com", command)
def testmodelunavailable(self):
command = "search host --model model-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Model model-does-not-exist not found.",
command)
def testmodelvendorconflict(self):
command = "search host --model dl360g9 --vendor dell"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Model dl360g9, vendor dell not found.",
command)
def testmodelmachinetypeconflict(self):
command = "search host --model dl360g9 --machine_type virtual_machine"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Model dl360g9, model_type "
"virtual_machine not found.", command)
def testvendoravailable(self):
command = "search host --vendor dell"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest12.aqd-unittest.ms.com", command)
def testvendorunavailable(self):
command = "search host --vendor vendor-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Vendor vendor-does-not-exist not found",
command)
def testmachinetypeavailable(self):
command = "search host --machine_type rackmount"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest12.aqd-unittest.ms.com", command)
def testmachinetypeunavailable(self):
command = "search host --machine_type machine_type-does-not-exist"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "Unknown machine type "
"machine_type-does-not-exist", command)
def testserialavailable(self):
def testfunc():
command = "search host --serial 99C5553"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
self.assertTruedeprecation(TestSearchHost.SERIAL_DEPR_STR, testfunc)
def testserialunavailable(self):
def testfunc():
command = "search host --serial serial-does-not-exist"
self.noouttest(command.split(" "))
self.assertTruedeprecation(TestSearchHost.SERIAL_DEPR_STR, testfunc)
def testlocationavailable(self):
command = "search host --rack ut3"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchoutput(out, "unittest01.one-nyp.ms.com", command)
self.matchoutput(out, "unittest02.one-nyp.ms.com", command)
self.matchoutput(out, "unittest12.aqd-unittest.ms.com", command)
def testlocationbuilding(self):
command = "search host --building ut"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchoutput(out, "unittest01.one-nyp.ms.com", command)
self.matchoutput(out, "aquilon61.aqd-unittest.ms.com", command)
self.matchoutput(out, "server1.aqd-unittest.ms.com", command)
self.matchoutput(out, "evh1.aqd-unittest.ms.com", command)
def testlocationcampus(self):
command = "search host --campus ny"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchoutput(out, "unittest01.one-nyp.ms.com", command)
self.matchoutput(out, "aquilon61.aqd-unittest.ms.com", command)
self.matchoutput(out, "server1.aqd-unittest.ms.com", command)
self.matchoutput(out, "evh1.aqd-unittest.ms.com", command)
def testlocationcomplex(self):
command = "search host --building ut --personality inventory"
out = self.commandtest(command.split(" "))
self.matchclean(out, "unittest00.one-nyp.ms.com", command)
self.matchoutput(out, "unittest12.aqd-unittest.ms.com", command)
self.matchoutput(out, "aquilon61.aqd-unittest.ms.com", command)
self.matchclean(out, "server1.aqd-unittest.ms.com", command)
self.matchclean(out, "evh1.aqd-unittest.ms.com", command)
def testlocationunavailable(self):
command = "search host --building bldg-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Building bldg-not-exist not found", command)
def testlocationtoolong(self):
command = "search host --building building-does-not-exist"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "is more than the maximum 16 allowed.", command)
def testclusteravailable(self):
command = "search host --cluster utecl1"
out = self.commandtest(command.split(" "))
self.matchclean(out, "evh1.aqd-unittest.ms.com", command)
self.matchoutput(out, "evh2.aqd-unittest.ms.com", command)
self.matchoutput(out, "evh3.aqd-unittest.ms.com", command)
self.matchoutput(out, "evh4.aqd-unittest.ms.com", command)
def testclusterunavailable(self):
command = "search host --cluster cluster-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Cluster cluster-does-not-exist not found",
command)
def testclusterunavailablefull(self):
command = "search host --fullinfo --cluster cluster-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Cluster cluster-does-not-exist not found",
command)
def testguestoncluster(self):
command = "search host --guest_on_cluster utecl5"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "ivirt1.aqd-unittest.ms.com", command)
self.matchclean(out, "ivirt4.aqd-unittest.ms.com", command)
def testguestonshare(self):
command = "search host --guest_on_share utecl5_share"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "ivirt1.aqd-unittest.ms.com", command)
self.matchclean(out, "ivirt4.aqd-unittest.ms.com", command)
def testprotobuf(self):
command = "search host --hostname unittest02.one-nyp.ms.com --format proto"
self.protobuftest(command.split(" "), expect=1)
def testprotobuf_machine_netdev_mix(self):
# Pick network with Machine as well as NetworkDevices
ip = self.net["vmotion_net"].network_address
command = ["search_host", "--networkip", ip, "--format=proto"]
hosts = self.protobuftest(command)
def testip(self):
ip = self.net["unknown0"].usable[2]
command = ["search_host", "--ip=%s" % ip]
out = self.commandtest(command)
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchclean(out, "unittest02", command)
def testhostenvironment(self):
command = ["search_host", "--host_environment", "prod"]
out = self.commandtest(command)
self.matchoutput(out, "evh1.aqd-unittest.ms.com", command)
self.matchoutput(out, "filer1.ms.com", command)
self.matchoutput(out, "nyaqd1.ms.com", command)
self.matchoutput(out, "aqddesk1.msad.ms.com", command)
self.matchclean(out, "aquilon61.aqd-unittest.ms.com", command)
self.matchclean(out, "ivirt1.aqd-unittest.ms.com", command)
self.matchclean(out, "unittest00.one-nyp.ms.com", command)
def testhostenvironment2(self):
command = ["search_host", "--host_environment", "dev"]
out = self.commandtest(command)
self.matchoutput(out, "aquilon61.aqd-unittest.ms.com", command)
self.matchoutput(out, "ivirt1.aqd-unittest.ms.com", command)
self.matchoutput(out, "unittest00.one-nyp.ms.com", command)
self.matchclean(out, "evh1.aqd-unittest.ms.com", command)
self.matchclean(out, "filer1.ms.com", command)
self.matchclean(out, "nyaqd1.ms.com", command)
self.matchclean(out, "aqddesk1.msad.ms.com", command)
def testhostenvironment3(self):
command = ["search_host", "--host_environment", "qa"]
self.noouttest(command)
def testhostenvironmentbad(self):
command = ["search_host", "--host_environment", "no-such-environment"]
out = self.badrequesttest(command)
self.matchoutput(out, "Unknown host environment 'no-such-environment'",
command)
def testmetacluster(self):
command = "search host --metacluster utmc8"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "evh80.aqd-unittest.ms.com", command)
self.matchoutput(out, "evh81.aqd-unittest.ms.com", command)
def testnetworkenv(self):
command = ["search_host", "--network_environment", "utcolo"]
out = self.commandtest(command)
self.output_equals(out, """
unittest25.aqd-unittest.ms.com
""", command)
def testdomainmismatch(self):
ip = self.net["unknown0"].usable[34]
self.dsdb_expect_add("mismatch.one-nyp.ms.com", ip,
"eth0_mismatch",
primary="infra1.aqd-unittest.ms.com")
self.noouttest(["add_interface_address",
"--machine", "infra1.aqd-unittest.ms.com",
"--interface", "eth0", "--label", "mismatch",
"--fqdn", "mismatch.one-nyp.ms.com", "--ip", ip])
command = ["search_host", "--hostname", "infra1.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.output_equals(out, "infra1.aqd-unittest.ms.com", command)
command = ["search_host", "--hostname", "infra1.one-nyp.ms.com"]
out = self.commandtest(command)
self.output_equals(out, "infra1.one-nyp.ms.com", command)
command = ["search_host", "--short", "infra1"]
out = self.commandtest(command)
self.output_equals(out, """
infra1.aqd-unittest.ms.com
infra1.one-nyp.ms.com
""", command)
self.dsdb_expect_delete(ip)
self.noouttest(["del_interface_address",
"--machine", "infra1.aqd-unittest.ms.com",
"--interface", "eth0", "--label", "mismatch"])
self.dsdb_verify()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSearchHost)
unittest.TextTestRunner(verbosity=5).run(suite)
|
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import configparser
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from textwrap import dedent
from typing import Dict
import pytest
from pants.option.config import Config, TomlSerializer
from pants.testutil.test_base import TestBase
from pants.util.contextutil import temporary_file
from pants.util.enums import match
from pants.util.ordered_set import OrderedSet
class ConfigFormat(Enum):
ini = "ini"
toml = "toml"
# MyPy doesn't like mixing dataclasses with ABC, which is indeed weird to do, but it works.
@dataclass(frozen=True) # type: ignore[misc]
class ConfigFile(ABC):
format: ConfigFormat
@property
def suffix(self) -> str:
return match(self.format, {ConfigFormat.ini: ".ini", ConfigFormat.toml: ".toml"})
@property
@abstractmethod
def content(self) -> str:
pass
@property
@abstractmethod
def default_values(self) -> Dict:
pass
@property
@abstractmethod
def expected_options(self) -> Dict:
pass
class File1(ConfigFile):
@property
def content(self) -> str:
return match(
self.format,
{
ConfigFormat.ini: dedent(
"""
[DEFAULT]
name: foo
answer: 42
scale: 1.2
path: /a/b/%(answer)s
embed: %(path)s::foo
disclaimer:
Let it be known
that.
[a]
list: [1, 2, 3, %(answer)s]
list2: +[7, 8, 9]
list3: -["x", "y", "z"]
[b]
preempt: True
[b.nested]
dict: {
'a': 1,
'b': %(answer)s,
'c': ['%(answer)s', '%(answer)s'],
}
[b.nested.nested-again]
movie: inception
[c]
name: overridden_from_default
interpolated_from_section: %(name)s is interpolated
recursively_interpolated_from_section: %(interpolated_from_section)s (again)
"""
),
ConfigFormat.toml: dedent(
"""
[DEFAULT]
name = "foo"
answer = 42
scale = 1.2
path = "/a/b/%(answer)s"
embed = "%(path)s::foo"
disclaimer = '''
Let it be known
that.'''
[a]
# TODO: once TOML releases its new version with support for heterogenous lists, we should be
# able to rewrite this to `[1, 2, 3, "%(answer)s"`. See
# https://github.com/toml-lang/toml/issues/665.
list = ["1", "2", "3", "%(answer)s"]
list2.add = [7, 8, 9]
list3.remove = ["x", "y", "z"]
[b]
preempt = true
[b.nested]
dict = '''
{
"a": 1,
"b": "%(answer)s",
"c": ["%(answer)s", "%(answer)s"],
}'''
[b.nested.nested-again]
movie = "inception"
[c]
name = "overridden_from_default"
interpolated_from_section = "%(name)s is interpolated"
recursively_interpolated_from_section = "%(interpolated_from_section)s (again)"
"""
),
},
)
@property
def default_values(self):
common_values = {
"name": "foo",
"answer": "42",
"scale": "1.2",
"path": "/a/b/42",
"embed": "/a/b/42::foo",
}
return match(
self.format,
{
ConfigFormat.ini: {**common_values, "disclaimer": "\nLet it be known\nthat."},
ConfigFormat.toml: {**common_values, "disclaimer": "Let it be known\nthat."},
},
)
@property
def expected_options(self) -> Dict:
ini_values = {
"a": {"list": "[1, 2, 3, 42]", "list2": "+[7, 8, 9]", "list3": '-["x", "y", "z"]',},
"b": {"preempt": "True",},
"b.nested": {"dict": "{\n'a': 1,\n'b': 42,\n'c': ['42', '42'],\n}"},
"b.nested.nested-again": {"movie": "inception",},
"c": {
"name": "overridden_from_default",
"interpolated_from_section": "overridden_from_default is interpolated",
"recursively_interpolated_from_section": "overridden_from_default is interpolated (again)",
},
}
return match(
self.format,
{
ConfigFormat.ini: ini_values,
ConfigFormat.toml: {
**ini_values,
"a": {**ini_values["a"], "list": '["1", "2", "3", "42"]',},
"b.nested": {"dict": '{\n "a": 1,\n "b": "42",\n "c": ["42", "42"],\n}'},
},
},
)
class File2(ConfigFile):
@property
def content(self) -> str:
return match(
self.format,
{
ConfigFormat.ini: dedent(
"""
[a]
fast: True
[b]
preempt: False
[d]
list: +[0, 1],-[8, 9]
[empty_section]
[p.child]
no_values_in_parent: True
"""
),
ConfigFormat.toml: dedent(
"""
[a]
fast = true
[b]
preempt = false
[d]
list.add = [0, 1]
list.remove = [8, 9]
[empty_section]
[p.child]
no_values_in_parent = true
"""
),
},
)
@property
def default_values(self) -> Dict:
return {}
@property
def expected_options(self) -> Dict:
return {
"a": {"fast": "True"},
"b": {"preempt": "False"},
"d": {"list": "+[0, 1],-[8, 9]"},
"empty_section": {},
"p.child": {"no_values_in_parent": "True"},
}
class ConfigBaseTest(TestBase):
__test__ = False
# Subclasses should define these
file1 = File1(ConfigFormat.ini)
file2 = File2(ConfigFormat.ini)
def _setup_config(self) -> Config:
with temporary_file(binary_mode=False, suffix=self.file1.suffix) as config1, temporary_file(
binary_mode=False, suffix=self.file2.suffix
) as config2:
config1.write(self.file1.content)
config1.close()
config2.write(self.file2.content)
config2.close()
parsed_config = Config.load(
config_paths=[config1.name, config2.name],
seed_values={"buildroot": self.build_root},
)
assert [config1.name, config2.name] == parsed_config.sources()
return parsed_config
def setUp(self) -> None:
self.config = self._setup_config()
self.default_seed_values = Config._determine_seed_values(
seed_values={"buildroot": self.build_root},
)
self.expected_combined_values = {
**self.file1.expected_options,
**self.file2.expected_options,
"a": {**self.file2.expected_options["a"], **self.file1.expected_options["a"],},
}
def test_sections(self) -> None:
expected_sections = list(
OrderedSet([*self.file2.expected_options.keys(), *self.file1.expected_options.keys()])
)
assert self.config.sections() == expected_sections
for section in expected_sections:
assert self.config.has_section(section) is True
# We should only look at explicitly defined sections. For example, if `cache.java` is defined
# but `cache` is not, then `cache` should not be included in the sections.
assert self.config.has_section("p") is False
def test_has_option(self) -> None:
# Check has all DEFAULT values
for default_option in (*self.default_seed_values.keys(), *self.file1.default_values.keys()):
assert self.config.has_option(section="DEFAULT", option=default_option) is True
# Check every explicitly defined section has its options + the seed defaults
for section, options in self.expected_combined_values.items():
for option in (*options, *self.default_seed_values):
assert self.config.has_option(section=section, option=option) is True
# Check every section for file1 also has file1's DEFAULT values
for section in self.file1.expected_options:
for option in self.file1.default_values:
assert self.config.has_option(section=section, option=option) is True
# Check that file1's DEFAULT values don't apply to sections only defined in file2
sections_only_in_file2 = set(self.file2.expected_options.keys()) - set(
self.file1.expected_options.keys()
)
for section in sections_only_in_file2:
for option in self.file1.default_values:
assert self.config.has_option(section=section, option=option) is False
# Check that non-existent options are False
nonexistent_options = {
"DEFAULT": "fake",
"a": "fake",
"b": "fast",
}
for section, option in nonexistent_options.items():
assert self.config.has_option(section=section, option=option) is False
# Check that sections aren't misclassified as options
nested_sections = {
"b": "nested",
"b.nested": "nested-again",
"p": "child",
}
for parent_section, child_section in nested_sections.items():
assert self.config.has_option(section=parent_section, option=child_section) is False
def test_list_all_options(self) -> None:
# This is used in `options_bootstrapper.py` to validate that every option is recognized.
file1_config = self.config.configs()[1]
file2_config = self.config.configs()[0]
for section, options in self.file1.expected_options.items():
expected = list(options.keys())
expected.extend(
default_option
for default_option in (
*self.default_seed_values.keys(),
*self.file1.default_values.keys(),
)
if default_option not in expected
)
assert file1_config.values.options(section=section) == expected
for section, options in self.file2.expected_options.items():
assert file2_config.values.options(section=section) == [
*options.keys(),
*self.default_seed_values.keys(),
]
# Check non-existent section
for config in file1_config, file2_config:
with pytest.raises(configparser.NoSectionError):
config.values.options("fake")
def test_default_values(self) -> None:
# This is used in `options_bootstrapper.py` to ignore default values when validating options.
file1_config = self.config.configs()[1]
file2_config = self.config.configs()[0]
# NB: string interpolation should only happen when calling _ConfigValues.get_value(). The
# values for _ConfigValues.defaults are not yet interpolated.
default_file1_values_unexpanded = {
**self.file1.default_values,
"path": "/a/b/%(answer)s",
"embed": "%(path)s::foo",
}
assert file1_config.values.defaults == {
**self.default_seed_values,
**default_file1_values_unexpanded,
}
assert file2_config.values.defaults == self.default_seed_values
def test_get(self) -> None:
# Check the DEFAULT section
for option, value in {**self.default_seed_values, **self.file1.default_values}.items():
assert self.config.get(section="DEFAULT", option=option) == value
# Check the combined values, including that each section has the default seed values
for section, section_values in self.expected_combined_values.items():
for option, value in {**section_values, **self.default_seed_values}.items():
assert self.config.get(section=section, option=option) == value
# Check that each section from file1 also has file1's default values, unless that section
# explicitly overrides the default
for section, section_values in self.file1.expected_options.items():
for option, default_value in self.file1.default_values.items():
expected = default_value if option not in section_values else section_values[option]
assert self.config.get(section=section, option=option) == expected
def check_defaults(default: str) -> None:
assert self.config.get(section="c", option="fast") is None
assert self.config.get(section="c", option="preempt", default=None) is None
assert self.config.get(section="c", option="jake", default=default) == default
check_defaults("")
check_defaults("42")
def test_empty(self) -> None:
config = Config.load([])
assert config.sections() == []
assert config.sources() == []
assert config.has_section("DEFAULT") is False
assert config.has_option(section="DEFAULT", option="name") is False
class ConfigIniTest(ConfigBaseTest):
__test__ = True
file1 = File1(ConfigFormat.ini)
file2 = File2(ConfigFormat.ini)
class ConfigTomlTest(ConfigBaseTest):
__test__ = True
file1 = File1(ConfigFormat.toml)
file2 = File2(ConfigFormat.toml)
class ConfigIniWithTomlTest(ConfigBaseTest):
__test__ = True
file1 = File1(ConfigFormat.ini)
file2 = File2(ConfigFormat.toml)
class ConfigTomlWithIniTest(ConfigBaseTest):
__test__ = True
file1 = File1(ConfigFormat.toml)
file2 = File2(ConfigFormat.ini)
def test_toml_serializer() -> None:
original_values: Dict = {
"GLOBAL": {
"truthy": True,
"falsy": False,
"int": 0,
"float": 0.0,
"word": "hello there",
"listy": ["a", "b", "c"],
"map": {"a": 0, "b": 1},
},
"cache.java": {"o": "",},
"inception.nested.nested-again.one-more": {"o": "",},
}
assert TomlSerializer(original_values).normalize() == {
"GLOBAL": {**original_values["GLOBAL"], "map": "{'a': 0, 'b': 1}",},
"cache": {"java": {"o": ""}},
"inception": {"nested": {"nested-again": {"one-more": {"o": ""}}}},
}
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'ProjectPlan.latitude'
db.alter_column(u'projects_projectplan', 'latitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=21, decimal_places=18))
# Changing field 'ProjectPlan.longitude'
db.alter_column(u'projects_projectplan', 'longitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=21, decimal_places=18))
def backwards(self, orm):
# Changing field 'ProjectPlan.latitude'
db.alter_column(u'projects_projectplan', 'latitude', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'ProjectPlan.longitude'
db.alter_column(u'projects_projectplan', 'longitude', self.gf('django.db.models.fields.FloatField')(null=True))
models = {
u'accounts.bluebottleuser': {
'Meta': {'object_name': 'BlueBottleUser'},
'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}),
'availability': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'available_time': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'picture': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'share_money': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'share_time_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'user_type': ('django.db.models.fields.CharField', [], {'default': "'person'", 'max_length': '25'}),
'username': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'geo.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'alpha2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'alpha3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'oda_recipient': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.SubRegion']"})
},
u'geo.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'})
},
u'geo.subregion': {
'Meta': {'ordering': "['name']", 'object_name': 'SubRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Region']"})
},
u'organizations.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization'},
'account_bank_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bank_country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}),
'account_bank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11', 'blank': 'True'}),
'account_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_iban': ('django_iban.fields.IBANField', [], {'max_length': '34', 'blank': 'True'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'account_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legal_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'partner_organizations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'registration': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'projects.partnerorganization': {
'Meta': {'object_name': 'PartnerOrganization'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'projects.project': {
'Meta': {'ordering': "['title']", 'object_name': 'Project'},
'coach': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team_member'", 'null': 'True', 'to': u"orm['accounts.BlueBottleUser']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['accounts.BlueBottleUser']"}),
'partner_organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.PartnerOrganization']", 'null': 'True', 'blank': 'True'}),
'phase': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'projects.projectambassador': {
'Meta': {'object_name': 'ProjectAmbassador'},
'description': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectPlan']"})
},
u'projects.projectbudgetline': {
'Meta': {'object_name': 'ProjectBudgetLine'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '10'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectPlan']"})
},
u'projects.projectcampaign': {
'Meta': {'object_name': 'ProjectCampaign'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': "'10'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'money_asked': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['projects.Project']", 'unique': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'projects.projectpitch': {
'Meta': {'object_name': 'ProjectPitch'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'need': ('django.db.models.fields.CharField', [], {'default': "'both'", 'max_length': '20', 'null': 'True'}),
'pitch': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['projects.Project']", 'unique': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectTheme']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
u'projects.projectplan': {
'Meta': {'object_name': 'ProjectPlan'},
'campaign': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'effects': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'for_who': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'future': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '21', 'decimal_places': '18', 'blank': 'True'}),
'money_needed': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'need': ('django.db.models.fields.CharField', [], {'default': "'both'", 'max_length': '20', 'null': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organizations.Organization']", 'null': 'True', 'blank': 'True'}),
'pitch': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['projects.Project']", 'unique': 'True'}),
'reach': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectTheme']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'projects.projectresult': {
'Meta': {'object_name': 'ProjectResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['projects.Project']", 'unique': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['projects']
|
|
# plugin.py vi:ts=4:sw=4:expandtab:
#
# Splat plugins
# Authors:
# Landon Fuller <landonf@opendarwin.org>
# Will Barton <wbb4@opendarwin.org>
#
# Copyright (c) 2005 Three Rings Design, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright owner nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import splat
from splat import SplatError
import types
import logging
import ldap
import time
# Exceptions
class SplatPluginError(SplatError):
pass
class HelperController(object):
def __init__(self, name, module, interval, searchBase, searchFilter, requireGroup, helperOptions):
"""
Initialize Splat Helper from module
@param name: Unique caller-assigned name. Helpers with non-unique names will overwrite previous additions when added to a daemon context.
@param module: Module containing a single Helper subclass. Any other subclasses of Helper will be ignored.
@param interval: Run interval in seconds. An interval of '0' will cause the helper to be run only once.
@param searchBase: LDAP Search base
@param searchFilter: LDAP Search filter
@param requireGroup: Require any returned entries to be a member of a group supplied by addGroup().
@param helperOptions: Dictionary of helper-specific options
"""
self.helperClass = None
self.name = name
self.interval = interval
self.searchFilter = searchFilter
self.searchBase = searchBase
self.requireGroup = requireGroup
# Time of last successful run
self._lastRun = 0
self.groupsCtx = {}
self.groups = []
p = __import__(module, globals(), locals(), ['__file__'])
for attr in dir(p):
obj = getattr(p, attr)
if (isinstance(obj, (type, types.ClassType)) and issubclass(obj, Helper)):
# Skip abstract class
if (not obj == Helper):
self.helperClass = obj
break
if (self.helperClass == None):
raise SplatPluginError, "Helper module %s not found" % module
# Get the list of required attributes
self.searchAttr = self.helperClass.attributes()
# If None, request all user attributes (LDAP_ALL_USER_ATTRIBUTES)
if (self.searchAttr == None):
self.searchAttr = ('*', )
# Always retrieve the modifyTimestamp operational attribute, too.
self.searchAttr = self.searchAttr + ('modifyTimestamp',)
self.defaultContext = self.helperClass.parseOptions(helperOptions)
def addGroup(self, groupFilter, helperOptions = None):
"""
Add a new group filter.
@param groupFilter: Instance of ldaputils.client.GroupFilter
@param helperOptions; Group-specific helper options. Optional.
"""
if (helperOptions):
self.groupsCtx[groupFilter] = self.helperClass.parseOptions(helperOptions)
else:
self.groupsCtx[groupFilter] = self.defaultContext
# Groups must be tested in the order they are added
self.groups.append(groupFilter)
def work(self, ldapConnection):
"""
Find matching LDAP entries and fire off the helper
"""
logger = logging.getLogger(splat.LOG_NAME)
failure = False
# Save the start time, used to determine the last successful run
startTime = int(time.time())
# TODO LDAP scope support
entries = ldapConnection.search(self.searchBase, ldap.SCOPE_SUBTREE, self.searchFilter, self.searchAttr)
# Instantiate a plugin instance
plugin = self.helperClass()
# Iterate over the results
for entry in entries:
context = None
entryModified = False
groupModified = False
# Find the group helper instance, if any
for group in self.groups:
if (group.isMember(ldapConnection, entry.dn)):
context = self.groupsCtx[group]
# Get the modifyTimestamp of this group. If the group has
# been modified, this entry might have just been added to
# the group, in which case we want to treat the entry as
# modified.
groupEntry = ldapConnection.search(group.baseDN, group.scope, group.filter, ('modifyTimestamp',))[0]
if (groupEntry.attributes.has_key('modifyTimestamp')):
groupModTime = groupEntry.getModTime()
if groupModTime != None and groupModTime >= self._lastRun:
groupModified = True
# If no timestamp, assume the group has been modified.
else:
groupModified = True
# Break to outer loop
break
if (context == None and self.requireGroup == False):
context = self.defaultContext
elif (context == None and self.requireGroup == True):
# Move on, empty handed
logger.debug("DN %s matched zero groups and requireGroup is enabled for helper %s" % (entry.dn, self.name))
continue
# Check if our entry has been modified
if (entry.attributes.has_key('modifyTimestamp')):
entryModTime = entry.getModTime()
# Go on to next entry if the modifyTimetamp is malformed
if entryModTime == None:
continue
if (entryModTime >= self._lastRun):
entryModified = True
# If there is no modifyTimestamp, just say entry has been modified
else:
entryModified = True
try:
plugin.work(context, entry, entryModified or groupModified)
except splat.SplatError, e:
failure = True
logger.error("Helper invocation for '%s' failed with error: %s" % (self.name, e))
# Let the plugin clean itself up
try:
plugin.finish()
except splat.SplatError, e:
failure = True
logger.error("Helper finish invocation for '%s' failed with error: %s" % (self.name, e))
# If the entire run was successful, update the last-run timestamp.
#
# We use the start time, rather than the current time, as modifications
# may occur between when the run starts, and when the run finishes.
if (not failure):
self._lastRun = startTime
class Helper(object):
"""
Abstract class for Splat helper plugins
"""
@classmethod
def attributes(self):
"""
Return required LDAP attributes. Return None to have
all available attributes returned.
"""
raise NotImplementedError, \
"This method is not implemented in this abstract class"
@classmethod
def _parseBooleanOption(self, option):
"""
Case insensitively convert a string option 'true' or 'false' to
the appropriate boolean, and throw an exception if the option
isn't either of those strings.
"""
if option.lower() == 'true':
return True
elif option.lower() == 'false':
return False
else:
raise SplatPluginError, "Invalid value for option %s specified; must be set to true or false." % option
@classmethod
def parseOptions(self, options):
"""
Parse the supplied options dict and return
an opaque configuration context.
"""
raise NotImplementedError, \
"This method is not implemented in this abstract class"
def work(self, context, ldapEntry, modified):
"""
Do something useful with the supplied ldapEntry
"""
raise NotImplementedError, \
"This method is not implemented in this abstract class"
def finish(self):
"""
Called after all data has been passed to the work() method.
Override this to implement any necessary post-processing; eg,
flushing modifications to disk, etc.
"""
pass
|
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from collections import OrderedDict
def latex_matrix_string(mean, title,
row_labels, col_labels,
best_bold_row=True, best_bold_column=False):
"""
Latex Matrix String Generator.
Example
-------
mean = [[1, 6, 5, 7], [12, 4, 6, 13], [9, 8, 7, 10]]
print(latex_matrix_string(mean, "Testing Testing", [
"row1", "row2", "row3"], [
"col1", "col2", "col3", "col4"]))
Parameters
----------
mean : array of float array
An array of float arrays containing mean values
title : string
Title string of the table
row_labels : string array
Array of strings for row names
col_labels : string arrays
Array of strings for column names
best_bold_row : boolean
If set to true, the minimum mean entry in each row will
be set to bold.
best_bold_column :
If set to true, the minimum mean entry in each column will
be set to bold.
"""
matrix_string = '''\hline
'''
for i, row in enumerate(mean):
column_string = '''{ |c'''
matrix_string = matrix_string + \
"\\textbf{" + row_labels[i] + "}& " # length of row labels and number of rows must be equal
for j, cell in enumerate(row):
column_string = column_string + '''|c'''
ending_string = ''' & ''' if j < len(row) - 1 else ''' \\\ \hline'''
if best_bold_row and cell == min(
row) and best_bold_column == False:
matrix_string = matrix_string + \
"$\mathbf{" + str(cell) + "}$" + ending_string
elif best_bold_column and cell == min([a[j] for a in mean]) and best_bold_row == False:
matrix_string = matrix_string + \
"$\mathbf{" + str(cell) + "}$" + ending_string
else:
matrix_string = matrix_string + "$" + \
str(cell) + "$" + ending_string
column_string = column_string + '''| }'''
column_label = ""
for column in col_labels:
column_label = column_label + "&\\textbf{" + column + "}"
latex_string1 = '''\\begin{table}[ht]
\centering
\\begin{tabular}
''' + column_string + '''
\hline
''' + column_label + "\\\ [0.1ex]" + '''
''' + matrix_string + '''\end{tabular}
\\\[-1.5ex]
\caption{''' + title + '''}
\end{table}'''
return latex_string1
def latex_matrix_string_mean_error(mean, error, title,
row_labels, col_labels,
best_bold_row=True, best_bold_column=False):
"""
Latex Matrix String Generator.
Example
-------
mean = [[1, 6, 5, 7], [12, 4, 6, 13], [9, 8, 7, 10]]
error = [[2, 6, 1, 5], [4, 8, 2, 3], [1, 4, 8, 2]]
print(latex_matrix_string(mean, error, "Testing Testing", [
"row1", "row2", "row3"], [
"col1", "col2", "col3", "col4"]))
Parameters
----------
mean : array of float array
An array of float arrays containing mean values
error : array of float array
An array of float array containing error values
title : string
Title string of the table
row_labels : string array
Array of strings for row names
col_labels : string arrays
Array of strings for column names
best_bold_row : boolean
If set to true, the minimum mean entry in each row will
be set to bold.
best_bold_column :
If set to true, the minimum mean entry in each column will
be set to bold.
"""
matrix_string = '''\hline
'''
for i, row in enumerate(mean):
column_string = '''{ |c'''
matrix_string = matrix_string + \
"\\textbf{" + row_labels[i] + "}& " # length of row labels and number of rows must be equal
for j, cell in enumerate(row):
column_string = column_string + '''|c'''
ending_string = ''' & ''' if j < len(row) - 1 else ''' \\\ \hline'''
if best_bold_row and cell == min(
row) and best_bold_column == False:
matrix_string = matrix_string + \
"$\mathbf{" + str(cell) + " \pm " + str(error[i][j]) + "}$" + ending_string
elif best_bold_column and cell == min([a[j] for a in mean]) and best_bold_row == False:
matrix_string = matrix_string + \
"$\mathbf{" + str(cell) + " \pm " + str(error[i][j]) + "}$" + ending_string
else:
matrix_string = matrix_string + "$" + \
str(cell) + " \pm " + str(error[i][j]) + "$" + ending_string
column_string = column_string + '''| }'''
column_label = ""
for column in col_labels:
column_label = column_label + "&\\textbf{" + column + "}"
latex_string1 = '''\\begin{table}[ht]
\centering
\\begin{tabular}
''' + column_string + '''
\hline
''' + column_label + "\\\ [0.1ex]" + '''
''' + matrix_string + '''\end{tabular}
\\\[-1.5ex]
\caption{''' + title + '''}
\end{table}'''
return latex_string1
# def plot_over_iterations(x, methods, metric="mean", labels=None, linewidth=3, fontsize_label=25,
# x_label="Error", y_label="Number of iterations", log_y=False, log_x=False,
# title="", legend_loc=1, percentiles=(5, 95), colors=None, plot_legend=True):
# """
# Plots performance over iterations of different methods .
#
# Example:
# ----------------------------
# x = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4]])
# method_1 = np.array([[1,4,5,2], [3,4,3,6] , [2,5,5,8]])
# method_2 = np.array([[8,7,5,9], [7,3,9,1] , [3,2,9,4]])
# method_3 = np.array([[10,13,9,11], [9,12,10,10] , [11,14,18,6]])
# methods = [method_1, method_2, method_3]
# plot = plot_median(x,methods)
# plot.show()
#
# Parameters:
# ----------
# x : numpy array
# For each curve, contains the x-coordinates. Each entry
# corresponds to one method.
# methods : list of numpy arrays
# A list of numpy arrays of methods. Each method contains a numpy array
# of several run of that corresponding method.
# method_names: List of Strings
# A list of names for the methods
#
# Returns
# -------
# plt : object
# Plot Object
# """
#
# if labels is None:
# labels = ["Method-%d" % i for i in range(len(methods))]
#
# styles = ["o", "D", "s", ">", "<", "^", "v", "*", "*", ".", ",", "1", "2", "3", "4"]
#
# if colors is None:
# colors = ["blue", "green", "purple", "darkorange", "red",
# "palevioletred", "lightseagreen", "brown", "black",
# "firebrick", "cyan", "gold", "slategray"]
#
# for index, method in enumerate(methods):
# style = styles[index % len(styles)]
# color = colors[index % len(colors)]
# if metric == "median":
# plt.plot(x[index], np.median(method, axis=0), label=labels[index], linewidth=linewidth, marker=style,
# color=color)
# elif metric == "mean":
# plt.plot(x[index], np.mean(method, axis=0), label=labels[index], linewidth=linewidth, marker=style,
# color=color)
# elif metric == "median_percentiles":
# plt.plot(x[index], np.median(method, axis=0), label=labels[index], linewidth=linewidth, marker=style,
# color=color)
# plt.fill_between(x[index], np.percentile(method, percentiles[0], axis=0),
# np.percentile(method, percentiles[1], axis=0),
# color=color, alpha=0.2)
# elif metric == "mean_std":
# plt.errorbar(x[index], np.mean(method, axis=0), yerr=np.std(method, axis=0),
# label=labels[index], linewidth=linewidth, marker=style, color=color)
# else:
# raise ValueError("Metric does not exist!")
#
# if plot_legend:
# plt.legend(loc=legend_loc, fancybox=True, framealpha=1, frameon=True, fontsize=fontsize_label)
#
# plt.xlabel(x_label, fontsize=fontsize_label)
# plt.ylabel(y_label, fontsize=fontsize_label)
# plt.grid(True, which='both', ls="-")
# if log_y:
# plt.yscale("log")
# if log_x:
# plt.xscale("log")
#
# plt.title(title, fontsize=fontsize_label)
# return plt
def fill_trajectory(performance_list, time_list, replace_nan=np.NaN):
frame_dict = OrderedDict()
counter = np.arange(0, len(performance_list))
for p, t, c in zip(performance_list, time_list, counter):
if len(p) != len(t):
raise ValueError("(%d) Array length mismatch: %d != %d" %
(c, len(p), len(t)))
frame_dict[str(c)] = pd.Series(data=p, index=t)
merged = pd.DataFrame(frame_dict)
merged = merged.ffill()
performance = merged.get_values()
time_ = merged.index.values
performance[np.isnan(performance)] = replace_nan
if not np.isfinite(performance).all():
raise ValueError("\nCould not merge lists, because \n"
"\t(a) one list is empty?\n"
"\t(b) the lists do not start with the same times and"
" replace_nan is not set?\n"
"\t(c) any other reason.")
return performance, time_
def plot_optimization_trajectories(times, methods, metric="mean", labels=None, linewidth=3, fontsize=20,
x_label="Error", y_label="Time", log_y=False, log_x=False, plot_legend=True,
title="", legend_loc=0, percentiles=(5, 95), colors=None, std_scale=1,
markersize=15, markevery=1):
"""
Plot performance over time
Example:
----------------------------
x = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4]])
method_1 = np.array([[1,4,5,2], [3,4,3,6] , [2,5,5,8]])
method_2 = np.array([[8,7,5,9], [7,3,9,1] , [3,2,9,4]])
method_3 = np.array([[10,13,9,11], [9,12,10,10] , [11,14,18,6]])
methods = [method_1, method_2, method_3]
plot = plot_optimization_trajectories(x, methods)
plot.show()
"""
if labels is None:
labels = ["Method-%d" % i for i in range(len(methods))]
styles = ["o", "D", "s", ">", "<", "^", "v", "*", "*", "."]
if colors is None:
colors = ["blue", "green", "purple", "darkorange",
"red", "palevioletred", "lightseagreen", "brown", "black"]
for index, method in enumerate(methods):
style = styles[index % len(styles)]
color = colors[index % len(colors)]
if metric == "median":
plt.plot(times[index], np.median(method, axis=0), label=labels[index],
linewidth=linewidth, marker=style, color=color, markevery=markevery,
markersize=markersize, markeredgecolor=color, markerfacecolor='none', markeredgewidth=5)
elif metric == "mean":
plt.plot(times[index], np.mean(method, axis=0), label=labels[index],
linewidth=linewidth, marker=style, color=color, markevery=markevery,
markersize=markersize, markeredgecolor=color, markerfacecolor='none', markeredgewidth=5)
elif metric == "median_percentiles":
plt.plot(times[index], np.median(method, axis=0), label=labels[index],
linewidth=linewidth, marker=style, color=color, markevery=markevery,
markersize=markersize, markeredgecolor=color, markerfacecolor='none', markeredgewidth=5)
plt.fill_between(times[index], np.percentile(method, percentiles[0], axis=0),
np.percentile(method, percentiles[1], axis=0),
color=color, alpha=0.2)
elif metric == "mean_std":
plt.plot(times[index], np.mean(method, axis=0), label=labels[index],
linewidth=linewidth, marker=style, color=color, markevery=markevery,
markersize=markersize, markeredgecolor=color, markerfacecolor='none', markeredgewidth=5)
plt.fill_between(times[index], np.mean(method, axis=0) + std_scale * np.std(method, axis=0),
np.mean(method, axis=0) - std_scale * np.std(method, axis=0),
linewidth=linewidth, color=color, alpha=0.2)
elif metric == "mean_sem":
plt.plot(times[index], np.mean(method, axis=0), label=labels[index],
linewidth=linewidth, marker=style, color=color, markevery=markevery,
markersize=markersize, markeredgecolor=color, markerfacecolor='none', markeredgewidth=5)
plt.fill_between(times[index], np.mean(method, axis=0) + std_scale * stats.sem(method, axis=0),
np.mean(method, axis=0) - std_scale * stats.sem(method, axis=0),
linewidth=linewidth, color=color, alpha=0.2)
else:
raise ValueError("Metric does not exist!")
if plot_legend:
plt.legend(loc=legend_loc, fancybox=True, framealpha=1, frameon=True, fontsize=fontsize)
plt.xlabel(x_label, fontsize=fontsize)
plt.ylabel(y_label, fontsize=fontsize)
if log_y:
plt.yscale("log")
if log_x:
plt.xscale("log")
plt.grid(True, which='both', ls="-", alpha=1)
plt.title(title, fontsize=fontsize)
return plt
|
|
import datetime
import json
import mock
from sqlalchemy.exc import IntegrityError
from werkzeug.exceptions import BadRequest, Forbidden
from rdr_service import config, singletons
from rdr_service.api_util import open_cloud_file
from rdr_service.clock import FakeClock
from rdr_service.code_constants import (
COHORT_1_REVIEW_CONSENT_NO_CODE,
COHORT_1_REVIEW_CONSENT_YES_CODE,
CONSENT_COPE_DEFERRED_CODE,
CONSENT_COPE_NO_CODE,
CONSENT_COPE_YES_CODE,
GENDER_IDENTITY_QUESTION_CODE,
COPE_VACCINE_MINUTE_1_MODULE_CODE,
COPE_VACCINE_MINUTE_2_MODULE_CODE,
COPE_VACCINE_MINUTE_3_MODULE_CODE,
PMI_SKIP_CODE,
PPI_SYSTEM,
PRIMARY_CONSENT_UPDATE_QUESTION_CODE,
THE_BASICS_PPI_MODULE, COPE_VACCINE_MINUTE_4_MODULE_CODE,
BASICS_PROFILE_UPDATE_QUESTION_CODES
)
from rdr_service.concepts import Concept
from rdr_service.dao.code_dao import CodeDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.dao.questionnaire_dao import QuestionnaireDao
from rdr_service.dao.questionnaire_response_dao import (
QuestionnaireResponseAnswerDao,
QuestionnaireResponseDao,
_raise_if_gcloud_file_missing,
)
from rdr_service.domain_model.response import Answer
from rdr_service.model.code import Code, CodeType
from rdr_service.model.participant import Participant
from rdr_service.model.participant_summary import ParticipantSummary
from rdr_service.model.questionnaire import Questionnaire, QuestionnaireConcept, QuestionnaireQuestion
from rdr_service.model.questionnaire_response import QuestionnaireResponse, QuestionnaireResponseAnswer, \
QuestionnaireResponseStatus
from rdr_service.model.resource_data import ResourceData
from rdr_service.participant_enums import GenderIdentity, QuestionnaireStatus, WithdrawalStatus, ParticipantCohort
from tests import test_data
from tests.test_data import (
consent_code,
cope_consent_code,
email_code,
first_name_code,
last_name_code,
login_phone_number_code,
to_client_participant_id
)
from tests.helpers.unittest_base import BaseTestCase, PDRGeneratorTestMixin
TIME = datetime.datetime(2016, 1, 1)
TIME_2 = datetime.datetime(2016, 1, 2)
TIME_3 = datetime.datetime(2016, 1, 3)
TIME_4 = datetime.datetime(2016, 1, 4)
ANSWERS = {"answers": {}}
QUESTIONNAIRE_RESOURCE = '{"x": "y", "version": "V1"}'
QUESTIONNAIRE_RESOURCE_2 = '{"x": "z", "version": "V1"}'
QUESTIONNAIRE_RESPONSE_RESOURCE = '{"resourceType": "QuestionnaireResponse", "a": "b"}'
QUESTIONNAIRE_RESPONSE_RESOURCE_2 = '{"resourceType": "QuestionnaireResponse", "a": "c"}'
QUESTIONNAIRE_RESPONSE_RESOURCE_3 = '{"resourceType": "QuestionnaireResponse", "a": "d"}'
_FAKE_BUCKET = {"example": "ptc-uploads-unit-testing"}
DNA_PROGRAM_CONSENT_UPDATE_CODE_VALUE = 'new_unknown_consent_update_code'
def with_id(resource, id_):
resource_json = json.loads(resource)
resource_json["id"] = str(id_)
return json.dumps(resource_json)
class QuestionnaireResponseDaoTest(PDRGeneratorTestMixin, BaseTestCase):
def setUp(self):
super().setUp()
self.code_dao = CodeDao()
self.participant_dao = ParticipantDao()
self.questionnaire_dao = QuestionnaireDao()
self.questionnaire_response_dao = QuestionnaireResponseDao()
self.questionnaire_response_answer_dao = QuestionnaireResponseAnswerDao()
self.participant_summary_dao = ParticipantSummaryDao()
self.CODE_1 = Code(
codeId=1,
system=PPI_SYSTEM,
value=GENDER_IDENTITY_QUESTION_CODE,
display="c",
topic="d",
codeType=CodeType.QUESTION,
mapped=True,
)
self.CODE_2 = Code(codeId=2, system="a", value="x", display="y", codeType=CodeType.QUESTION, mapped=False)
self.CODE_3 = Code(codeId=3, system="a", value="c", codeType=CodeType.ANSWER, mapped=True, parentId=1)
self.CODE_4 = Code(codeId=4, system="a", value="d", codeType=CodeType.ANSWER, mapped=True, parentId=2)
self.CODE_5 = Code(codeId=5, system="a", value="e", codeType=CodeType.ANSWER, mapped=False, parentId=1)
self.CODE_6 = Code(codeId=6, system="a", value="f", codeType=CodeType.ANSWER, mapped=True, parentId=1)
self.MODULE_CODE_7 = Code(
codeId=7, system=PPI_SYSTEM, value=THE_BASICS_PPI_MODULE, codeType=CodeType.MODULE, mapped=True
)
self.CONCEPT_1 = QuestionnaireConcept(codeId=7)
self.CODE_1_QUESTION_1 = QuestionnaireQuestion(linkId="a", codeId=1, repeats=False)
self.CODE_2_QUESTION = QuestionnaireQuestion(linkId="d", codeId=2, repeats=True)
# Same code as question 1
self.CODE_1_QUESTION_2 = QuestionnaireQuestion(linkId="x", codeId=1, repeats=False)
self.skip_code = Code(codeId=8, system=PPI_SYSTEM, value=PMI_SKIP_CODE, mapped=True, codeType=CodeType.ANSWER)
self.cope_consent_yes = Code(codeId=9, system=PPI_SYSTEM, value=CONSENT_COPE_YES_CODE, mapped=True,
codeType=CodeType.ANSWER)
self.cope_consent_no = Code(codeId=10, system=PPI_SYSTEM, value=CONSENT_COPE_NO_CODE, mapped=True,
codeType=CodeType.ANSWER)
self.cope_consent_deferred = Code(codeId=11, system=PPI_SYSTEM, value=CONSENT_COPE_DEFERRED_CODE, mapped=True,
codeType=CodeType.ANSWER)
config.override_setting(config.CONSENT_PDF_BUCKET, _FAKE_BUCKET)
config.override_setting(config.DNA_PROGRAM_CONSENT_UPDATE_CODE, DNA_PROGRAM_CONSENT_UPDATE_CODE_VALUE)
self.dna_program_consent_update_code = Code(system=PPI_SYSTEM, mapped=True, codeType=CodeType.MODULE,
value=DNA_PROGRAM_CONSENT_UPDATE_CODE_VALUE)
self.consent_update_question_code = self.data_generator.create_database_code(
codeId=25,
value=PRIMARY_CONSENT_UPDATE_QUESTION_CODE
)
config.override_setting(config.COPE_FORM_ID_MAP, {
'Form_13,Cope': 'May',
'June': 'June',
'Form_1': 'July',
'NovCope': 'Nov'
})
def _setup_questionnaire(self):
q = Questionnaire(resource=QUESTIONNAIRE_RESOURCE)
q.concepts.append(self.CONCEPT_1)
q.concepts.append(QuestionnaireConcept(codeId=self.consent_code_id))
q.questions.append(self.CODE_1_QUESTION_1)
q.questions.append(self.CODE_2_QUESTION)
q.questions.append(self.FN_QUESTION)
q.questions.append(self.LN_QUESTION)
q.questions.append(self.EMAIL_QUESTION)
q.questions.append(self.LOGIN_PHONE_NUMBER_QUESTION)
return self.questionnaire_dao.insert(q)
def insert_codes(self):
self.code_dao.insert(self.CODE_1)
self.code_dao.insert(self.CODE_2)
self.code_dao.insert(self.CODE_3)
self.code_dao.insert(self.CODE_4)
self.code_dao.insert(self.CODE_5)
self.code_dao.insert(self.CODE_6)
self.code_dao.insert(self.MODULE_CODE_7)
self.code_dao.insert(self.skip_code)
self.code_dao.insert(self.cope_consent_yes)
self.code_dao.insert(self.cope_consent_no)
self.code_dao.insert(self.cope_consent_deferred)
self.consent_code_id = self.code_dao.insert(consent_code()).codeId
self.first_name_code_id = self.code_dao.insert(first_name_code()).codeId
self.last_name_code_id = self.code_dao.insert(last_name_code()).codeId
self.email_code_id = self.code_dao.insert(email_code()).codeId
self.cope_consent_id = self.code_dao.insert(cope_consent_code()).codeId
self.dna_program_consent_update_code_id = self.code_dao.insert(self.dna_program_consent_update_code).codeId
self.consent_update_yes = self.data_generator.create_database_code(value=COHORT_1_REVIEW_CONSENT_YES_CODE)
self.consent_update_no = self.data_generator.create_database_code(value=COHORT_1_REVIEW_CONSENT_NO_CODE)
self.login_phone_number_code_id = self.code_dao.insert(login_phone_number_code()).codeId
self.FN_QUESTION = QuestionnaireQuestion(linkId="fn", codeId=self.first_name_code_id, repeats=False)
self.LN_QUESTION = QuestionnaireQuestion(linkId="ln", codeId=self.last_name_code_id, repeats=False)
self.EMAIL_QUESTION = QuestionnaireQuestion(linkId="email", codeId=self.email_code_id, repeats=False)
self.LOGIN_PHONE_NUMBER_QUESTION = QuestionnaireQuestion(
linkId="lpn", codeId=self.login_phone_number_code_id, repeats=False
)
self.first_name = self.fake.first_name()
self.last_name = self.fake.last_name()
self.email = self.fake.email()
self.login_phone_number = self.fake.phone_number()
self.FN_ANSWER = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=3, questionnaireResponseId=1, questionId=3, valueString=self.first_name
)
self.LN_ANSWER = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=4, questionnaireResponseId=1, questionId=4, valueString=self.last_name
)
self.EMAIL_ANSWER = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=5, questionnaireResponseId=1, questionId=5, valueString=self.email
)
self.LOGIN_PHONE_NUMBER_ANSWER = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=6,
questionnaireResponseId=1,
questionId=6,
valueString=self.login_phone_number
)
self.vaccine_1_survey_module_code = self.data_generator.create_database_code(
value=COPE_VACCINE_MINUTE_1_MODULE_CODE
)
self.vaccine_2_survey_module_code = self.data_generator.create_database_code(
value=COPE_VACCINE_MINUTE_2_MODULE_CODE
)
self.vaccine_3_survey_module_code = self.data_generator.create_database_code(
value=COPE_VACCINE_MINUTE_3_MODULE_CODE
)
self.vaccine_4_survey_module_code = self.data_generator.create_database_code(
value=COPE_VACCINE_MINUTE_4_MODULE_CODE
)
def setup_basics_profile_update_codes_list(self):
self.basics_profile_update_codes = list()
for code_value in BASICS_PROFILE_UPDATE_QUESTION_CODES:
code = self.data_generator.create_database_code(
value=code_value,
codeType=3
)
self.basics_profile_update_codes.append(code.codeId)
def check_response(self, expected_qr):
qr = self.questionnaire_response_dao.get_with_children(expected_qr.questionnaireResponseId)
self.assertResponseDictEquals(expected_qr.asdict(follow=ANSWERS), qr.asdict(follow=ANSWERS))
def _names_and_email_answers(self):
return [self.FN_ANSWER, self.LN_ANSWER, self.EMAIL_ANSWER]
def _names_and_login_phone_number_answers(self):
return [self.FN_ANSWER, self.LN_ANSWER, self.LOGIN_PHONE_NUMBER_ANSWER]
def test_get_before_insert(self):
self.assertIsNone(self.questionnaire_response_dao.get(1))
self.assertIsNone(self.questionnaire_response_dao.get_with_children(1))
self.assertIsNone(self.questionnaire_response_answer_dao.get(1))
def test_insert_questionnaire_not_found(self):
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
with self.assertRaises(BadRequest):
self._insert_questionnaire_response(qr)
def test_insert_participant_not_found(self):
self.insert_codes()
q = Questionnaire(resource=QUESTIONNAIRE_RESOURCE)
q.concepts.append(QuestionnaireConcept(codeId=self.consent_code_id))
self.questionnaire_dao.insert(q)
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.extend(self._names_and_email_answers())
# Answers are there but the participant is not.
with self.assertRaises(BadRequest):
self._insert_questionnaire_response(qr)
def test_insert_participant_not_found2(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2, withdrawalStatus=WithdrawalStatus.NOT_WITHDRAWN)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=2,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.extend(self._names_and_email_answers())
with self.assertRaises(BadRequest):
self._insert_questionnaire_response(qr)
def test_insert_not_name_answers(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.append(
QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=2,
questionnaireResponseId=1,
questionId=2,
valueSystem="c",
valueCodeId=4,
)
)
# Both first and last name are required.
with self.assertRaises(BadRequest):
self._insert_questionnaire_response(qr)
def test_insert_first_name_only(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.append(self.FN_ANSWER)
with self.assertRaises(BadRequest):
self._insert_questionnaire_response(qr)
def test_insert_last_name_only(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.append(self.LN_ANSWER)
# Both first and last name are required.
with self.assertRaises(BadRequest):
self._insert_questionnaire_response(qr)
def test_insert_names_only(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.append(self.FN_ANSWER)
qr.answers.append(self.LN_ANSWER)
# Email is required.
with self.assertRaises(BadRequest):
self._insert_questionnaire_response(qr)
def test_insert_email_only(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.append(self.EMAIL_ANSWER)
# First and last name are required.
with self.assertRaises(BadRequest):
self._insert_questionnaire_response(qr)
def test_insert_login_phone_number_only(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.append(self.LOGIN_PHONE_NUMBER_ANSWER)
# First and last name are required.
with self.assertRaises(BadRequest):
self._insert_questionnaire_response(qr)
def test_insert_both_email_and_login_phone_number_without_names(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.append(self.EMAIL_ANSWER)
qr.answers.append(self.LOGIN_PHONE_NUMBER_ANSWER)
# First and last name are required.
with self.assertRaises(BadRequest):
self._insert_questionnaire_response(qr)
def test_insert_both_names_and_login_phone_number(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.extend(self._names_and_login_phone_number_answers())
time = datetime.datetime(2016, 1, 1)
with FakeClock(time):
qr.authored = time
self._insert_questionnaire_response(qr)
expected_qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
participantId=1,
questionnaireSemanticVersion='V1',
resource=with_id(QUESTIONNAIRE_RESPONSE_RESOURCE, 1),
created=time,
authored=time,
)
expected_qr.answers.extend(self._names_and_login_phone_number_answers())
self._add_consent_extension_to_response(expected_qr)
qr2 = self.questionnaire_response_dao.get(1)
self.assertResponseDictEquals(expected_qr.asdict(), qr2.asdict())
self.check_response(expected_qr)
def test_insert_both_names_and_email(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.extend(self._names_and_email_answers())
time = datetime.datetime(2016, 1, 1)
with FakeClock(time):
qr.authored = time
self._insert_questionnaire_response(qr)
expected_qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
participantId=1,
questionnaireSemanticVersion='V1',
resource=with_id(QUESTIONNAIRE_RESPONSE_RESOURCE, 1),
created=time,
authored=time,
)
self._add_consent_extension_to_response(expected_qr)
expected_qr.answers.extend(self._names_and_email_answers())
qr2 = self.questionnaire_response_dao.get(1)
self.assertResponseDictEquals(expected_qr.asdict(), qr2.asdict())
self.check_response(expected_qr)
def test_insert_both_names_and_email_and_login_phone_number(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.append(self.FN_ANSWER)
qr.answers.append(self.LN_ANSWER)
qr.answers.append(self.EMAIL_ANSWER)
qr.answers.append(self.LOGIN_PHONE_NUMBER_ANSWER)
time = datetime.datetime(2016, 1, 1)
with FakeClock(time):
qr.authored = time
self._insert_questionnaire_response(qr)
expected_qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
participantId=1,
questionnaireSemanticVersion='V1',
resource=with_id(QUESTIONNAIRE_RESPONSE_RESOURCE, 1),
created=time,
authored=time,
)
expected_qr.answers.append(self.FN_ANSWER)
expected_qr.answers.append(self.LN_ANSWER)
expected_qr.answers.append(self.EMAIL_ANSWER)
expected_qr.answers.append(self.LOGIN_PHONE_NUMBER_ANSWER)
self._add_consent_extension_to_response(expected_qr)
qr2 = self.questionnaire_response_dao.get(1)
self.assertResponseDictEquals(expected_qr.asdict(), qr2.asdict())
self.check_response(expected_qr)
def test_insert_duplicate(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.extend(self._names_and_email_answers())
self._insert_questionnaire_response(qr)
qr2 = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE_2
)
qr2.answers.append(
QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=2,
questionnaireResponseId=1,
questionId=2,
valueSystem="c",
valueCodeId=4,
)
)
with self.assertRaises(IntegrityError):
self._insert_questionnaire_response(qr2)
def test_insert_skip_codes(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
with FakeClock(TIME):
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
answer_1 = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=1,
questionnaireResponseId=1,
questionId=1,
valueSystem="a",
valueCodeId=self.skip_code.codeId,
)
answer_2 = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=2, questionnaireResponseId=1, questionId=2, valueSystem="c", valueCodeId=4
)
qr.answers.extend([answer_1, answer_2])
qr.answers.extend(self._names_and_email_answers())
with FakeClock(TIME_2):
self._insert_questionnaire_response(qr)
expected_qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
participantId=1,
questionnaireSemanticVersion='V1',
resource=with_id(QUESTIONNAIRE_RESPONSE_RESOURCE, 1),
created=TIME_2,
authored=TIME_2,
)
self._add_consent_extension_to_response(expected_qr)
qr2 = self.questionnaire_response_dao.get(1)
self.assertResponseDictEquals(expected_qr.asdict(), qr2.asdict())
expected_qr.answers.extend([answer_1, answer_2])
expected_qr.answers.extend(self._names_and_email_answers())
self.check_response(expected_qr)
expected_ps = self.data_generator._participant_summary_with_defaults(
genderIdentity=GenderIdentity.PMI_Skip,
genderIdentityId=8,
participantId=1,
biobankId=2,
signUpTime=TIME,
numCompletedBaselinePPIModules=1,
numCompletedPPIModules=1,
questionnaireOnTheBasics=QuestionnaireStatus.SUBMITTED,
questionnaireOnTheBasicsTime=TIME_2,
questionnaireOnTheBasicsAuthored=TIME_2,
consentForStudyEnrollment=QuestionnaireStatus.SUBMITTED,
consentForStudyEnrollmentTime=TIME_2,
consentForStudyEnrollmentAuthored=TIME_2,
consentForStudyEnrollmentFirstYesAuthored=TIME_2,
firstName=self.first_name,
lastName=self.last_name,
email=self.email,
lastModified=TIME_2,
patientStatus=[],
semanticVersionForPrimaryConsent='V1',
consentCohort=ParticipantCohort.COHORT_1,
retentionEligibleStatus=None,
wasEhrDataAvailable=False
)
self.assertEqual(expected_ps.asdict(), self.participant_summary_dao.get(1).asdict())
def test_loading_basics_profile_update_codes(self):
""" Verify QuestionnaireResponseDao() object initialized with a list of TheBasics profile update loads """
# Adds the profile update codes to the unittest db Code table and saves a list of the codeIds to the test object
# Force reload of cached code data after test setup / code creation
self.setup_basics_profile_update_codes_list()
singletons.invalidate(singletons.CODE_CACHE_INDEX)
singletons.invalidate(singletons.BASICS_PROFILE_UPDATE_CODES_CACHE_INDEX)
qr_dao = QuestionnaireResponseDao()
# assertCountEqual compares iterables for item equivalence, not just count/length
self.assertCountEqual(self.basics_profile_update_codes, qr_dao.thebasics_profile_update_codes)
@mock.patch.object(QuestionnaireResponseDao, '_load_thebasics_profile_update_codes')
def test_caching_basics_profile_update_codes(self, load_mock):
""" Confirm subsequent QuestionnaireResponseDao() instantiations retrieve profile update codes from cache """
load_mock.return_value = [1, 2, 3]
qr_dao_1 = QuestionnaireResponseDao()
qr_dao_2 = QuestionnaireResponseDao()
# Verify the cache miss lambda load function executed once (for the first DAO object instantiation), but both
# DAO objects have the expected code list values (second instantiation retrieved its list from app cache)
self.assertEqual(load_mock.call_count, 1)
self.assertCountEqual(qr_dao_1.thebasics_profile_update_codes, load_mock.return_value)
self.assertCountEqual(qr_dao_2.thebasics_profile_update_codes, load_mock.return_value)
def assertResponseDictEquals(self, expected_response: dict, actual_response: dict):
expected_resource_json = json.loads(expected_response['resource'])
del expected_response['resource']
actual_resource_json = json.loads(actual_response['resource'])
del actual_response['resource']
self.assertEqual(expected_response, actual_response, 'QuestionnaireResponses dicts do not match')
self.assertEqual(expected_resource_json, actual_resource_json, 'Response resource strings do not match')
@staticmethod
def _add_consent_extension_to_response(response: QuestionnaireResponse):
# Add necessary consent file extension to resource json
resource_json = json.loads(response.resource)
resource_json['extension'] = [{
"url": "http://terminology.pmi-ops.org/StructureDefinition/consent-form-signed-pdf",
"valueString": "Participant/nonexistent/test_consent_file_name.pdf"
}]
response.resource = json.dumps(resource_json)
def _insert_questionnaire_response(self, response):
self._add_consent_extension_to_response(response)
# Send the consent, making it look like any necessary cloud files exist (for primary consent)
with mock.patch('rdr_service.dao.questionnaire_response_dao._raise_if_gcloud_file_missing', return_value=True):
self.questionnaire_response_dao.insert(response)
def _setup_participant(self):
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
answer_1 = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=1,
questionnaireResponseId=1,
questionId=1,
valueSystem="a",
valueCodeId=3,
valueDecimal=123,
valueString=self.fake.first_name(),
valueDate=datetime.date.today(),
)
answer_2 = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=2, questionnaireResponseId=1, questionId=2, valueSystem="c", valueCodeId=4
)
qr.answers.append(answer_1)
qr.answers.append(answer_2)
names_and_email_answers = self._names_and_email_answers()
qr.answers.extend(names_and_email_answers)
with FakeClock(TIME_2):
self._insert_questionnaire_response(qr)
def _create_questionnaire(self, created_date=datetime.datetime.now(), question_code_id=None,
identifier='1', module_code: Code = None) -> Questionnaire:
questionnaire = Questionnaire(resource=QUESTIONNAIRE_RESOURCE, externalId=identifier)
if module_code:
concept = QuestionnaireConcept(codeId=module_code.codeId)
questionnaire.concepts = [concept]
if question_code_id:
question = QuestionnaireQuestion(codeId=question_code_id, repeats=False)
questionnaire.questions.append(question)
with FakeClock(created_date):
return self.questionnaire_dao.insert(questionnaire)
def _create_cope_questionnaire(self, identifier='Cope'):
self._create_questionnaire(datetime.datetime.now(), self.cope_consent_id, identifier=identifier)
def _submit_questionnaire_response(self, response_consent_code=None, questionnaire_version=1,
consent_question_id=7, authored_datetime=datetime.datetime(2020, 5, 5)):
qr = self.data_generator._questionnaire_response(
questionnaireId=2,
questionnaireVersion=questionnaire_version,
questionnaireSemanticVersion=f'V{questionnaire_version}',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE,
authored=authored_datetime
)
if response_consent_code:
answer = QuestionnaireResponseAnswer(
questionnaireResponseId=2,
questionId=consent_question_id,
valueSystem="a",
valueCodeId=response_consent_code.codeId,
)
qr.answers.extend([answer])
self._insert_questionnaire_response(qr)
def test_cope_updates_num_completed(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
with FakeClock(TIME):
self.participant_dao.insert(p)
self._setup_participant()
self._create_cope_questionnaire()
self._submit_questionnaire_response(self.cope_consent_yes)
self.assertEqual(2, self.participant_summary_dao.get(1).numCompletedPPIModules)
self.assertEqual(QuestionnaireStatus.SUBMITTED, self.participant_summary_dao.get(1).questionnaireOnCopeMay)
def test_cope_resubmit(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
self._create_cope_questionnaire(identifier='Form_13')
self._submit_questionnaire_response(self.cope_consent_yes)
self._submit_questionnaire_response(self.cope_consent_no)
self.assertEqual(QuestionnaireStatus.SUBMITTED_NO_CONSENT,
self.participant_summary_dao.get(1).questionnaireOnCopeMay)
def test_cope_june_survey_status(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
self._create_cope_questionnaire(identifier='June')
self._submit_questionnaire_response(self.cope_consent_yes)
participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(QuestionnaireStatus.SUBMITTED, participant_summary.questionnaireOnCopeJune)
def test_july_cope_survey(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
self._create_cope_questionnaire(identifier='Form_1')
self._submit_questionnaire_response(self.cope_consent_yes)
participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(QuestionnaireStatus.SUBMITTED, participant_summary.questionnaireOnCopeJuly)
def test_cope_june_survey_consent_deferred(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
self._create_cope_questionnaire(identifier='June')
self._submit_questionnaire_response(self.cope_consent_deferred)
participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(QuestionnaireStatus.SUBMITTED_NO_CONSENT, participant_summary.questionnaireOnCopeJune)
@mock.patch('rdr_service.dao.questionnaire_response_dao.logging')
def test_unrecognized_cope_form(self, mock_logging):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
self._create_cope_questionnaire(identifier='new_id')
self._submit_questionnaire_response(self.cope_consent_yes, authored_datetime=datetime.datetime(2020, 8, 9))
participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(QuestionnaireStatus.SUBMITTED, participant_summary.questionnaireOnCopeJuly)
self.assertEqual(datetime.datetime(2020, 8, 9), participant_summary.questionnaireOnCopeJulyAuthored)
mock_logging.error.assert_called_with(
'Unrecognized identifier for COPE survey response '
'(questionnaire_id: "2", version: "1", identifier: "new_id"'
)
@mock.patch('rdr_service.dao.questionnaire_response_dao.logging')
def test_unrecognized_november_cope_form(self, mock_logging):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
self._create_cope_questionnaire(identifier='oct_unknown')
self._submit_questionnaire_response(self.cope_consent_yes, authored_datetime=datetime.datetime(2020, 10, 14))
participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(QuestionnaireStatus.SUBMITTED, participant_summary.questionnaireOnCopeNov)
self.assertEqual(datetime.datetime(2020, 10, 14), participant_summary.questionnaireOnCopeNovAuthored)
mock_logging.error.assert_called_with(
'Unrecognized identifier for COPE survey response '
'(questionnaire_id: "2", version: "1", identifier: "oct_unknown"'
)
def test_november_cope_survey(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
num_completed_ppi_after_setup = self.participant_summary_dao.get(1).numCompletedPPIModules
self._create_cope_questionnaire(identifier='NovCope')
self._submit_questionnaire_response(self.cope_consent_yes)
participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(QuestionnaireStatus.SUBMITTED, participant_summary.questionnaireOnCopeNov)
self.assertEqual(num_completed_ppi_after_setup + 1, participant_summary.numCompletedPPIModules)
def test_december_cope_survey(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
num_completed_ppi_after_setup = self.participant_summary_dao.get(1).numCompletedPPIModules
self._create_cope_questionnaire(identifier='DecCope')
self._submit_questionnaire_response(self.cope_consent_yes, authored_datetime=datetime.datetime(2020, 12, 8))
participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(QuestionnaireStatus.SUBMITTED, participant_summary.questionnaireOnCopeDec)
self.assertEqual(num_completed_ppi_after_setup + 1, participant_summary.numCompletedPPIModules)
def test_february_cope_survey(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
num_completed_ppi_after_setup = self.participant_summary_dao.get(1).numCompletedPPIModules
self._create_cope_questionnaire(identifier='FEB_FORM')
self._submit_questionnaire_response(self.cope_consent_yes, authored_datetime=datetime.datetime(2021, 2, 17))
participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(QuestionnaireStatus.SUBMITTED, participant_summary.questionnaireOnCopeFeb)
self.assertEqual(num_completed_ppi_after_setup + 1, participant_summary.numCompletedPPIModules)
def test_covid_19_serology_results(self):
""" Test the covid 19 serology results survey"""
self.insert_codes()
participant_id = self.create_participant()
self.send_consent(participant_id)
questionnaire_id = self.create_questionnaire("questionnaire_covid_19_serology_results.json")
url = f"Participant/{participant_id}/QuestionnaireResponse"
code_answers = list()
code_answers.append(('covid_19_serology_results_decision', Concept(PPI_SYSTEM, 'Decision_No')))
resource = self.make_questionnaire_response_json(participant_id, questionnaire_id,
code_answers=code_answers)
response = self.send_post(url, resource)
self.assertEqual(response["group"]["question"][0]["answer"][0]["valueCoding"]['code'], 'Decision_No')
record: ResourceData = self.session.query(ResourceData).\
filter(ResourceData.resourcePKAltID==participant_id).one()
decision_found = False
for mod in record.resource['modules']:
if mod['module'] == 'covid_19_serology_results':
decision_found = True
self.assertEqual(mod['consent_value'], 'Decision_No')
self.assertEqual(mod['status'], 'SUBMITTED_NO_CONSENT')
break
self.assertEqual(decision_found, True)
def setup_cope_minute_base_survey(self, module_num):
"""
Setup for base logic/data for cope minute surveys.
Module num sets which survey to create questionnaire for
:param module_num: int
:return: dict
"""
self.insert_codes()
module_map = {
1: self.vaccine_1_survey_module_code,
2: self.vaccine_2_survey_module_code,
3: self.vaccine_3_survey_module_code,
4: self.vaccine_4_survey_module_code
}
participant = self.data_generator.create_database_participant(participantId=1, biobankId=2)
self._setup_participant()
num_completed_ppi_after_setup = self.participant_summary_dao.get(1).numCompletedPPIModules
questionnaire = self._create_questionnaire(module_code=module_map[module_num])
authored_date = datetime.datetime(2021, 3, 4)
self.submit_questionnaire_response(
participant_id=to_client_participant_id(participant.participantId),
questionnaire_id=questionnaire.questionnaireId,
authored_datetime=authored_date
)
summary: ParticipantSummary = self.session.query(ParticipantSummary).filter(
ParticipantSummary.participantId == participant.participantId
).one()
return {
'participant': participant,
'num_completed_ppi_after_setup': num_completed_ppi_after_setup,
'authored_date': authored_date,
'summary': summary
}
def test_cope_first_minute_survey(self):
base_setup = self.setup_cope_minute_base_survey(module_num=1)
summary = base_setup['summary']
participant = base_setup['participant']
authored_date = base_setup['authored_date']
self.assertEqual(QuestionnaireStatus.SUBMITTED, summary.questionnaireOnCopeVaccineMinute1)
self.assertEqual(authored_date, summary.questionnaireOnCopeVaccineMinute1Authored)
self.assertEqual(base_setup['num_completed_ppi_after_setup'] + 1, summary.numCompletedPPIModules)
participant_res_data = self.make_participant_resource(participant.participantId)
[vaccine_module_data] = self.get_generated_items(
participant_res_data['modules'],
item_key='module',
item_value=COPE_VACCINE_MINUTE_1_MODULE_CODE
)
self.assertIsNotNone(vaccine_module_data)
self.assertEqual(str(QuestionnaireStatus.SUBMITTED), vaccine_module_data['status'])
self.assertEqual(authored_date, vaccine_module_data['module_authored'])
def test_cope_second_minute_survey(self):
base_setup = self.setup_cope_minute_base_survey(module_num=2)
summary = base_setup['summary']
participant = base_setup['participant']
authored_date = base_setup['authored_date']
self.assertEqual(QuestionnaireStatus.SUBMITTED, summary.questionnaireOnCopeVaccineMinute2)
self.assertEqual(authored_date, summary.questionnaireOnCopeVaccineMinute2Authored)
self.assertEqual(base_setup['num_completed_ppi_after_setup'] + 1, summary.numCompletedPPIModules)
participant_res_data = self.make_participant_resource(participant.participantId)
[vaccine_module_data] = self.get_generated_items(
participant_res_data['modules'],
item_key='module',
item_value=COPE_VACCINE_MINUTE_2_MODULE_CODE
)
self.assertIsNotNone(vaccine_module_data)
self.assertEqual(str(QuestionnaireStatus.SUBMITTED), vaccine_module_data['status'])
self.assertEqual(authored_date, vaccine_module_data['module_authored'])
def test_cope_third_minute_survey(self):
base_setup = self.setup_cope_minute_base_survey(module_num=3)
summary = base_setup['summary']
participant = base_setup['participant']
authored_date = base_setup['authored_date']
self.assertEqual(QuestionnaireStatus.SUBMITTED, summary.questionnaireOnCopeVaccineMinute3)
self.assertEqual(authored_date, summary.questionnaireOnCopeVaccineMinute3Authored)
self.assertEqual(base_setup['num_completed_ppi_after_setup'] + 1, summary.numCompletedPPIModules)
participant_res_data = self.make_participant_resource(participant.participantId)
[vaccine_module_data] = self.get_generated_items(
participant_res_data['modules'],
item_key='module',
item_value=COPE_VACCINE_MINUTE_3_MODULE_CODE
)
self.assertIsNotNone(vaccine_module_data)
self.assertEqual(str(QuestionnaireStatus.SUBMITTED), vaccine_module_data['status'])
self.assertEqual(authored_date, vaccine_module_data['module_authored'])
def test_cope_fourth_minute_survey(self):
base_setup = self.setup_cope_minute_base_survey(module_num=4)
summary = base_setup['summary']
participant = base_setup['participant']
authored_date = base_setup['authored_date']
self.assertEqual(QuestionnaireStatus.SUBMITTED, summary.questionnaireOnCopeVaccineMinute4)
self.assertEqual(authored_date, summary.questionnaireOnCopeVaccineMinute4Authored)
self.assertEqual(base_setup['num_completed_ppi_after_setup'] + 1, summary.numCompletedPPIModules)
participant_res_data = self.make_participant_resource(participant.participantId)
[vaccine_module_data] = self.get_generated_items(
participant_res_data['modules'],
item_key='module',
item_value=COPE_VACCINE_MINUTE_4_MODULE_CODE
)
self.assertIsNotNone(vaccine_module_data)
self.assertEqual(str(QuestionnaireStatus.SUBMITTED), vaccine_module_data['status'])
self.assertEqual(authored_date, vaccine_module_data['module_authored'])
def test_ppi_questionnaire_count_field_not_found(self):
"""Make sure QuestionnaireResponseDao doesn't fail when an unknown field is part of the list"""
config.override_setting(config.PPI_QUESTIONNAIRE_FIELDS, ['questionnaireOnTheBasics', 'nonExistentField'])
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
num_completed_ppi_after_setup = self.participant_summary_dao.get(1).numCompletedPPIModules
self.assertEqual(1, num_completed_ppi_after_setup,
"Was expecting the Basics questionnaire as part of the setup to check that counting works")
def _create_dna_program_questionnaire(self, created_date=datetime.datetime(2020, 5, 5)):
self._create_questionnaire(created_date)
self.data_generator.create_database_questionnaire_concept(
questionnaireId=2,
questionnaireVersion=1,
codeId=self.dna_program_consent_update_code_id
)
def test_dna_program_consent_update_questionnaire(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
self._create_dna_program_questionnaire()
self._submit_questionnaire_response()
participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(QuestionnaireStatus.SUBMITTED, participant_summary.questionnaireOnDnaProgram)
def _create_consent_update_questionnaire(self, created_date=datetime.datetime(2020, 5, 5)):
self._create_questionnaire(created_date, question_code_id=self.consent_update_question_code.codeId)
def test_primary_consent_update_changes_study_authored(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
self._create_consent_update_questionnaire()
consent_update_authored_date = datetime.datetime(2020, 7, 27)
self._submit_questionnaire_response(
response_consent_code=self.consent_update_yes,
authored_datetime=consent_update_authored_date
)
participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(consent_update_authored_date, participant_summary.consentForStudyEnrollmentAuthored)
def test_consent_update_refusal_does_not_change_authored_time(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_participant()
participant_summary = self.participant_summary_dao.get(1)
original_consent_authored_time = participant_summary.consentForStudyEnrollmentAuthored
self._create_consent_update_questionnaire()
consent_update_authored_date = datetime.datetime(2020, 7, 27)
with FakeClock(consent_update_authored_date):
self._submit_questionnaire_response(response_consent_code=self.consent_update_no)
new_participant_summary = self.participant_summary_dao.get(1)
self.assertEqual(original_consent_authored_time, new_participant_summary.consentForStudyEnrollmentAuthored)
self.assertNotEqual(consent_update_authored_date, new_participant_summary.consentForStudyEnrollmentAuthored)
def test_from_client_json_raises_BadRequest_for_excessively_long_value_string(self):
self.insert_codes()
q_id = self.create_questionnaire("questionnaire1.json")
p_id = self.create_participant()
self.send_consent(p_id)
# First check that the normal case actually writes out correctly
string = "a" * QuestionnaireResponseAnswer.VALUE_STRING_MAXLEN
string_answers = [["nameOfChild", string]]
resource = self.make_questionnaire_response_json(p_id, q_id, string_answers=string_answers)
qr = self.questionnaire_response_dao.from_client_json(resource, participant_id=int(p_id[1:]))
with self.questionnaire_response_answer_dao.session() as session:
self._insert_questionnaire_response(qr)
all_strings_query = session.query(QuestionnaireResponseAnswer.valueString).all()
all_strings = [obj.valueString for obj in all_strings_query]
self.assertTrue(string in all_strings)
# Now check that the incorrect case throws
string = "a" * (QuestionnaireResponseAnswer.VALUE_STRING_MAXLEN + 1)
string_answers = [["nameOfChild", string]]
resource = self.make_questionnaire_response_json(p_id, q_id, string_answers=string_answers)
with self.assertRaises(BadRequest):
qr = self.questionnaire_response_dao.from_client_json(resource, participant_id=int(p_id[1:]))
def test_get_after_withdrawal_fails(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
qr.answers.extend(self._names_and_email_answers())
self._insert_questionnaire_response(qr)
p.withdrawalStatus = WithdrawalStatus.NO_USE
self.participant_dao.update(p)
with self.assertRaises(Forbidden):
self.questionnaire_response_dao.get(qr.questionnaireResponseId)
def test_insert_with_answers(self):
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
with FakeClock(TIME):
self.participant_dao.insert(p)
self._setup_questionnaire()
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
answer_1 = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=1,
questionnaireResponseId=1,
questionId=1,
valueSystem="a",
valueCodeId=3,
valueDecimal=123,
valueString=self.fake.first_name(),
valueDate=datetime.date.today(),
)
answer_2 = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=2, questionnaireResponseId=1, questionId=2, valueSystem="c", valueCodeId=4
)
qr.answers.append(answer_1)
qr.answers.append(answer_2)
names_and_email_answers = self._names_and_email_answers()
qr.answers.extend(names_and_email_answers)
with FakeClock(TIME_2):
self._insert_questionnaire_response(qr)
expected_qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
participantId=1,
questionnaireSemanticVersion='V1',
resource=with_id(QUESTIONNAIRE_RESPONSE_RESOURCE, 1),
created=TIME_2,
authored=TIME_2,
)
self._add_consent_extension_to_response(expected_qr)
qr2 = self.questionnaire_response_dao.get(1)
self.assertResponseDictEquals(expected_qr.asdict(), qr2.asdict())
expected_qr.answers.append(answer_1)
expected_qr.answers.append(answer_2)
expected_qr.answers.extend(names_and_email_answers)
self.check_response(expected_qr)
expected_ps = self.data_generator._participant_summary_with_defaults(
participantId=1,
biobankId=2,
genderIdentityId=3,
signUpTime=TIME,
numCompletedBaselinePPIModules=1,
numCompletedPPIModules=1,
questionnaireOnTheBasics=QuestionnaireStatus.SUBMITTED,
questionnaireOnTheBasicsTime=TIME_2,
questionnaireOnTheBasicsAuthored=TIME_2,
consentForStudyEnrollment=QuestionnaireStatus.SUBMITTED,
consentForStudyEnrollmentTime=TIME_2,
consentForStudyEnrollmentAuthored=TIME_2,
consentForStudyEnrollmentFirstYesAuthored=TIME_2,
lastModified=TIME_2,
firstName=self.first_name,
lastName=self.last_name,
email=self.email,
patientStatus=[],
semanticVersionForPrimaryConsent='V1',
consentCohort=ParticipantCohort.COHORT_1,
retentionEligibleStatus=None,
wasEhrDataAvailable=False
)
self.assertEqual(expected_ps.asdict(), self.participant_summary_dao.get(1).asdict())
def test_insert_qr_three_times(self):
"""Adds three questionnaire responses for the same participant.
The latter two responses are for the same questionnaire, answering a question that has the
same concept code and system as a question found on the first (different) questionnaire.
Verifies that new answers set endTime on answers for questions with the same concept for the
same participant, whether on the same questionnaire or a different questionnaire,
without affecting other answers.
"""
self.insert_codes()
p = Participant(participantId=1, biobankId=2)
with FakeClock(TIME):
self.participant_dao.insert(p)
self._setup_questionnaire()
q2 = Questionnaire(resource=QUESTIONNAIRE_RESOURCE_2)
# The question on the second questionnaire has the same concept as the first question on the
# first questionnaire; answers to it will thus set endTime for answers to the first question.
q2.questions.append(self.CODE_1_QUESTION_2)
q2.questions.append(self.CODE_1_QUESTION_1)
self.questionnaire_dao.insert(q2)
qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
questionnaireSemanticVersion='V1',
participantId=1,
resource=QUESTIONNAIRE_RESPONSE_RESOURCE
)
answer_1 = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=1,
questionnaireResponseId=1,
questionId=1,
valueSystem="a",
valueCodeId=3,
valueDecimal=123,
valueString=self.fake.first_name(),
valueDate=datetime.date.today(),
)
answer_2 = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=2, questionnaireResponseId=1, questionId=2, valueSystem="c", valueCodeId=4
)
qr.answers.append(answer_1)
qr.answers.append(answer_2)
qr.answers.extend(self._names_and_email_answers())
with FakeClock(TIME_2):
self._insert_questionnaire_response(qr)
expected_ps = self.data_generator._participant_summary_with_defaults(
participantId=1,
biobankId=2,
genderIdentityId=3,
signUpTime=TIME,
numCompletedBaselinePPIModules=1,
numCompletedPPIModules=1,
questionnaireOnTheBasics=QuestionnaireStatus.SUBMITTED,
questionnaireOnTheBasicsTime=TIME_2,
questionnaireOnTheBasicsAuthored=TIME_2,
consentForStudyEnrollment=QuestionnaireStatus.SUBMITTED,
consentForStudyEnrollmentTime=TIME_2,
consentForStudyEnrollmentAuthored=TIME_2,
consentForStudyEnrollmentFirstYesAuthored=TIME_2,
lastModified=TIME_2,
firstName=self.first_name,
lastName=self.last_name,
email=self.email,
patientStatus=[],
semanticVersionForPrimaryConsent='V1',
consentCohort=ParticipantCohort.COHORT_1,
retentionEligibleStatus=None,
wasEhrDataAvailable=False
)
self.assertEqual(expected_ps.asdict(), self.participant_summary_dao.get(1).asdict())
qr2 = self.data_generator._questionnaire_response(
questionnaireResponseId=2,
questionnaireId=2,
questionnaireVersion=1,
participantId=1,
questionnaireSemanticVersion='V1',
resource=QUESTIONNAIRE_RESPONSE_RESOURCE_2,
)
answer_3 = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=6,
questionnaireResponseId=2,
questionId=7,
valueSystem="x",
valueCodeId=5,
valueDecimal=123,
valueString=self.fake.last_name(),
valueDate=datetime.date.today(),
)
qr2.answers.append(answer_3)
with FakeClock(TIME_3):
qr2.authored = TIME_3
self._insert_questionnaire_response(qr2)
expected_qr = self.data_generator._questionnaire_response(
questionnaireResponseId=1,
questionnaireId=1,
questionnaireVersion=1,
participantId=1,
questionnaireSemanticVersion='V1',
resource=with_id(QUESTIONNAIRE_RESPONSE_RESOURCE, 1),
created=TIME_2,
authored=TIME_2,
)
# Answer one on the original response should be marked as ended, since a question with
# the same concept was answered. Answer two should be left alone.
answer_1.endTime = TIME_3
expected_qr.answers.append(answer_1)
expected_qr.answers.append(answer_2)
expected_qr.answers.extend(self._names_and_email_answers())
self._add_consent_extension_to_response(expected_qr)
self.check_response(expected_qr)
# The new questionnaire response should be there, too.
expected_qr2 = self.data_generator._questionnaire_response(
questionnaireResponseId=2,
questionnaireId=2,
questionnaireVersion=1,
participantId=1,
questionnaireSemanticVersion='V1',
resource=with_id(QUESTIONNAIRE_RESPONSE_RESOURCE_2, 2),
created=TIME_3,
authored=TIME_3,
)
expected_qr2.answers.append(answer_3)
self._add_consent_extension_to_response(expected_qr2)
self.check_response(expected_qr2)
expected_ps2 = self.data_generator._participant_summary_with_defaults(
participantId=1,
biobankId=2,
genderIdentityId=5,
signUpTime=TIME,
numCompletedBaselinePPIModules=1,
numCompletedPPIModules=1,
questionnaireOnTheBasics=QuestionnaireStatus.SUBMITTED,
questionnaireOnTheBasicsTime=TIME_2,
questionnaireOnTheBasicsAuthored=TIME_2,
lastModified=TIME_3,
consentForStudyEnrollment=QuestionnaireStatus.SUBMITTED,
consentForStudyEnrollmentTime=TIME_2,
consentForStudyEnrollmentAuthored=TIME_2,
consentForStudyEnrollmentFirstYesAuthored=TIME_2,
firstName=self.first_name,
lastName=self.last_name,
email=self.email,
patientStatus=[],
semanticVersionForPrimaryConsent='V1',
consentCohort=ParticipantCohort.COHORT_1,
retentionEligibleStatus=None,
wasEhrDataAvailable=False
)
# The participant summary should be updated with the new gender identity, but nothing else
# changes.
self.assertEqual(expected_ps2.asdict(), self.participant_summary_dao.get(1).asdict())
qr3 = self.data_generator._questionnaire_response(
questionnaireResponseId=3,
questionnaireId=2,
questionnaireVersion=1,
participantId=1,
questionnaireSemanticVersion='V1',
resource=QUESTIONNAIRE_RESPONSE_RESOURCE_3,
)
answer_4 = QuestionnaireResponseAnswer(
questionnaireResponseAnswerId=7,
questionnaireResponseId=3,
questionId=7,
valueSystem="z",
valueCodeId=6,
valueDecimal=456,
valueString=self.fake.last_name(),
valueDate=datetime.date.today(),
)
qr3.answers.append(answer_4)
with FakeClock(TIME_4):
qr3.authored = TIME_4
self._insert_questionnaire_response(qr3)
# The first questionnaire response hasn't changed.
self.check_response(expected_qr)
# The second questionnaire response's answer should have had an endTime set.
answer_3.endTime = TIME_4
self.check_response(expected_qr2)
# The third questionnaire response should be there.
expected_qr3 = self.data_generator._questionnaire_response(
questionnaireResponseId=3,
questionnaireId=2,
questionnaireVersion=1,
participantId=1,
questionnaireSemanticVersion='V1',
resource=with_id(QUESTIONNAIRE_RESPONSE_RESOURCE_3, 3),
created=TIME_4,
authored=TIME_4
)
expected_qr3.answers.append(answer_4)
self._add_consent_extension_to_response(expected_qr3)
self.check_response(expected_qr3)
expected_ps3 = self.data_generator._participant_summary_with_defaults(
participantId=1,
biobankId=2,
genderIdentityId=6,
signUpTime=TIME,
numCompletedBaselinePPIModules=1,
numCompletedPPIModules=1,
questionnaireOnTheBasics=QuestionnaireStatus.SUBMITTED,
questionnaireOnTheBasicsTime=TIME_2,
questionnaireOnTheBasicsAuthored=TIME_2,
consentForStudyEnrollment=QuestionnaireStatus.SUBMITTED,
consentForStudyEnrollmentTime=TIME_2,
consentForStudyEnrollmentAuthored=TIME_2,
consentForStudyEnrollmentFirstYesAuthored=TIME_2,
lastModified=TIME_4,
firstName=self.first_name,
lastName=self.last_name,
email=self.email,
patientStatus=[],
semanticVersionForPrimaryConsent='V1',
consentCohort=ParticipantCohort.COHORT_1,
retentionEligibleStatus=None,
wasEhrDataAvailable=False
)
# The participant summary should be updated with the new gender identity, but nothing else
# changes.
self.assertEqual(expected_ps3.asdict(), self.participant_summary_dao.get(1).asdict())
def _get_questionnaire_response_with_consents(self, *consent_paths):
self.insert_codes()
questionnaire = self._setup_questionnaire()
participant = Participant(participantId=1, biobankId=2)
self.participant_dao.insert(participant)
resource = test_data.load_questionnaire_response_with_consents(
questionnaire.questionnaireId,
participant.participantId,
self.FN_QUESTION.linkId,
self.LN_QUESTION.linkId,
self.EMAIL_QUESTION.linkId,
consent_paths,
)
# We need to remove the unused answers in the resource so we don't trigger an unused
# link id exception.
res_len = len(resource["group"]["question"]) - 1
for idx in range(res_len, -1, -1):
if resource["group"]["question"][idx]["linkId"].isdigit() is True:
del resource["group"]["question"][idx]
questionnaire_response = self.questionnaire_response_dao.from_client_json(resource, participant.participantId)
return questionnaire_response
@mock.patch("rdr_service.dao.questionnaire_response_dao._raise_if_gcloud_file_missing")
def test_consent_pdf_valid_leading_slash(self, mock_gcloud_check):
consent_pdf_path = "/Participant/xyz/consent.pdf"
questionnaire_response = self._get_questionnaire_response_with_consents(consent_pdf_path)
# This should pass validation (not raise exceptions).
self.questionnaire_response_dao.insert(questionnaire_response)
mock_gcloud_check.assert_called_with("/%s%s" % (_FAKE_BUCKET['example'], consent_pdf_path))
@mock.patch("rdr_service.dao.questionnaire_response_dao._raise_if_gcloud_file_missing")
def test_consent_pdf_valid_no_leading_slash(self, mock_gcloud_check):
consent_pdf_path = "Participant/xyz/consent.pdf"
questionnaire_response = self._get_questionnaire_response_with_consents(consent_pdf_path)
# This should pass validation (not raise exceptions).
self.questionnaire_response_dao.insert(questionnaire_response)
mock_gcloud_check.assert_called_with("/%s/%s" % (_FAKE_BUCKET['example'], consent_pdf_path))
@mock.patch("rdr_service.dao.questionnaire_response_dao._raise_if_gcloud_file_missing")
def test_consent_pdf_file_invalid(self, mock_gcloud_check):
mock_gcloud_check.side_effect = BadRequest("Test should raise this.")
qr = self._get_questionnaire_response_with_consents("/nobucket/no/file.pdf")
with self.assertRaises(BadRequest):
self.questionnaire_response_dao.insert(qr)
@mock.patch("rdr_service.dao.questionnaire_response_dao._raise_if_gcloud_file_missing")
def test_consent_pdf_checks_multiple_extensions(self, mock_gcloud_check):
qr = self._get_questionnaire_response_with_consents("/Participant/one.pdf", "/Participant/two.pdf")
self.questionnaire_response_dao.insert(qr)
self.assertEqual(mock_gcloud_check.call_count, 2)
def test_loading_response_collections(self):
# Create a questionnaire, and some responses that have different answers to the questions
questionnaire = self._generate_questionnaire(
survey_code='test_survey',
question_codes=[
't_1',
'T_2'
]
)
participant_id = self.data_generator.create_database_participant().participantId
self._generate_response(
questionnaire,
['one', 'two'],
participant_id=participant_id,
authored_date=datetime.datetime(2021, 10, 1),
created_date=datetime.datetime(2021, 10, 1)
)
self._generate_response(
questionnaire,
['nine', 'ten'],
participant_id=participant_id,
authored_date=datetime.datetime(2022, 3, 5),
created_date=datetime.datetime(2022, 3, 5)
)
participant_responses_map = QuestionnaireResponseDao.get_responses_to_surveys(
survey_codes=['test_survey'],
participant_ids=[participant_id],
session=self.session
)
responses = participant_responses_map[participant_id]
self.assertEqual({
't_1': [Answer(id=mock.ANY, value='one')],
't_2': [Answer(id=mock.ANY, value='two')]
}, responses.in_authored_order[0].answered_codes)
self.assertEqual({
't_1': [Answer(id=mock.ANY, value='nine')],
't_2': [Answer(id=mock.ANY, value='ten')]
}, responses.in_authored_order[1].answered_codes)
def _generate_response(self, questionnaire, answers, participant_id=None, authored_date=None, created_date=None):
response = self.data_generator.create_database_questionnaire_response(
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version,
participantId=participant_id,
authored=authored_date,
created=created_date,
status=QuestionnaireResponseStatus.COMPLETED
)
for index, answer in enumerate(answers):
self.data_generator.create_database_questionnaire_response_answer(
questionnaireResponseId=response.questionnaireResponseId,
questionId=questionnaire.questions[index].questionnaireQuestionId,
valueString=answer
)
return response
def _generate_questionnaire(self, survey_code, question_codes):
questionnaire = self.data_generator.create_database_questionnaire_history()
for code_str in question_codes:
self.data_generator.create_database_questionnaire_question(
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version,
code=self.data_generator.create_database_code(value=code_str)
)
self.data_generator.create_database_questionnaire_concept(
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version,
codeId=self.data_generator.create_database_code(value=survey_code).codeId
)
return questionnaire
class QuestionnaireResponseDaoCloudCheckTest(BaseTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.uses_database = False
def test_file_exists(self):
consent_pdf_path = "/%s/Participant/somefile.pdf" % _FAKE_BUCKET['example']
with self.assertRaises(BadRequest):
_raise_if_gcloud_file_missing(consent_pdf_path)
with open_cloud_file(consent_pdf_path, 'w') as cloud_file:
cloud_file.write("I am a fake PDF in a fake Cloud.")
_raise_if_gcloud_file_missing(consent_pdf_path)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ReplicationsOperations(object):
"""ReplicationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> "_models.Replication":
"""Gets the properties of the specified replication.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Replication, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2020_11_01_preview.models.Replication
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: "_models.Replication",
**kwargs: Any
) -> "_models.Replication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(replication, 'Replication')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Replication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: "_models.Replication",
**kwargs: Any
) -> LROPoller["_models.Replication"]:
"""Creates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param replication: The parameters for creating a replication.
:type replication: ~azure.mgmt.containerregistry.v2020_11_01_preview.models.Replication
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2020_11_01_preview.models.Replication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
replication=replication,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a replication from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: "_models.ReplicationUpdateParameters",
**kwargs: Any
) -> "_models.Replication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(replication_update_parameters, 'ReplicationUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Replication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: "_models.ReplicationUpdateParameters",
**kwargs: Any
) -> LROPoller["_models.Replication"]:
"""Updates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param replication_update_parameters: The parameters for updating a replication.
:type replication_update_parameters:
~azure.mgmt.containerregistry.v2020_11_01_preview.models.ReplicationUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2020_11_01_preview.models.Replication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
replication_update_parameters=replication_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> Iterable["_models.ReplicationListResult"]:
"""Lists all the replications for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReplicationListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerregistry.v2020_11_01_preview.models.ReplicationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ReplicationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications'} # type: ignore
|
|
# http://pythonhosted.org/dxfgrabber/#dxfgrabber
import pcbnew
import dxfgrabber
import re
import sys, os.path, inspect
import numpy as np
oldpath = sys.path
# inspect.stack()[0][1] is the full path to the current file.
sys.path.insert(0, os.path.dirname(inspect.stack()[0][1]))
import bulge
import pcbpoint
sys.path = oldpath
# the internal coorinate space of pcbnew is 10E-6 mm. (a millionth of a mm)
# the coordinate 121550000 corresponds to 121.550000
SCALE = 1000000.0
type_table = {}
for t in filter(lambda t: re.match("PCB_.*_T", t), dir(pcbnew)):
type_table[getattr(pcbnew, t)] = t
shape_table = {}
for s in filter(lambda s: re.match("S_.*", s), dir(pcbnew)):
shape_table[getattr(pcbnew, s)] = s
# generate a name->layer table so we can lookup layer numbers by name.
layertable = {}
numlayers = pcbnew.PCB_LAYER_ID_COUNT
for i in range(numlayers):
layertable[pcbnew.GetBoard().GetLayerName(i)] = i
def print_current_graphics():
# to get information about current graphics
for d in board.GetDrawings():
if (d.Type() == pcbnew.PCB_LINE_T):
# this type is DRAWSEGMENT in pcbnew/class_drawsegment.h
# the different shape types are defined in class_board_item.h enum STROKE_T
print("line shape {}".format(shape_table[d.GetShape()]))
# this is sample code for adding a polygon. The downside of polygons is they are filled.
# bummer
# the internal coorinate space of pcbnew is 10E-6 mm. (a millionth of a mm)
# the coordinate 121550000 corresponds to 121.550000
# SCALE = 1000000.0
# seg = pcbnew.DRAWSEGMENT(board)
# seg.SetLayer(44)
# seg.SetShape(pcbnew.S_POLYGON)
# sps = seg.GetPolyShape()
# o = sps.NewOutline()
# sps.Append(int(10.0*SCALE),int(10.0*SCALE), o)
# sps.Append(int(10.0*SCALE),int(20.0*SCALE), o)
# sps.Append(int(20.0*SCALE),int(20.0*SCALE), o)
# sps.Append(int(20.0*SCALE),int(10.0*SCALE), o)
# board.Add(seg)
def dxfarc2pcbarc(board, layer, center, radius, startangle, endangle):
# dxf arcs are different from pcbnew arcs
# dxf arcs have a center point, radius and start/stop angles
# pcbnew arcs have a center pointer, radius, and a start point, angle (counter clockwise)
seg = pcbnew.DRAWSEGMENT(board)
seg.SetLayer(layer)
seg.SetShape(pcbnew.S_ARC)
seg.SetCenter(center.wxpoint())
# need negative angles because pcbnew flips over x axis
sa, ea = (min(-startangle, -endangle), max(-startangle, -endangle))
seg.SetArcStart((center + pcbpoint.pcbpoint(radius*np.cos(np.deg2rad(startangle)),
radius*np.sin(np.deg2rad(startangle)))).wxpoint())
# y axis is flipped, so negative angles
seg.SetAngle((-endangle+startangle)*10)
board.Add(seg)
def dxf_to_graphic(board, layer, filepath, singlepoly=False):
dxf = dxfgrabber.readfile(filepath)
layer_count = len(dxf.layers) # collection of layer definitions
block_definition_count = len(dxf.blocks) # dict like collection of block definitions
entity_count = len(dxf.entities) # list like collection of entities
print("layers: {}".format(layer_count))
print("blocks: {}".format(block_definition_count))
print("entities:{}".format(entity_count))
for e in dxf.entities.get_entities():
if (e.dxftype == "LINE"):
seg = pcbnew.DRAWSEGMENT(board)
seg.SetLayer(layer)
seg.SetShape(pcbnew.S_SEGMENT)
seg.SetStart(pcbpoint.pcbpoint(e.start).wxpoint())
seg.SetEnd(pcbpoint.pcbpoint(e.end).wxpoint())
board.Add(seg)
if (e.dxftype == "CIRCLE"):
print("center {} radius {}".format(e.center, e.radius))
if (e.dxftype == "ARC"):
# dxf arcs are different from pcbnew arcs
# dxf arcs have a center point, radius and start/stop angles
# pcbnew arcs have a center pointer, radius, and a start point,
# angle (counter clockwise)
dxfarc2pcbarc(board, layer,
pcbpoint.pcbpoint(e.center),
e.radius, e.start_angle, e.end_angle)
if (e.dxftype == "LWPOLYLINE"):
if (singlepoly):
seg = pcbnew.DRAWSEGMENT(board)
seg.SetLayer(layer)
seg.SetShape(pcbnew.S_POLYGON)
board.Add(seg)
sps = seg.GetPolyShape()
o = sps.NewOutline()
for pt in e.points:
ppt = pcbpoint.pcbpoint(pt).wxpoint()
sps.Append(ppt.x, ppt.y)
else:
prevpt = e.points[-1]
curbulge = e.bulge[-1]
for pt, nextbulge in zip(e.points, e.bulge):
# y is minus because y increases going down the canvase
if (curbulge == 0.0):
seg = pcbnew.DRAWSEGMENT(board)
seg.SetLayer(layer)
seg.SetShape(pcbnew.S_SEGMENT)
seg.SetStart(pcbpoint.pcbpoint(prevpt).wxpoint())
seg.SetEnd(pcbpoint.pcbpoint(pt).wxpoint())
board.Add(seg)
else:
center, startangle, endangle, radius = bulge.bulge2arc(prevpt, pt, curbulge)
dxfarc2pcbarc(board, layer,
pcbpoint.pcbpoint(center),
radius, startangle, endangle)
prevpt = pt
curbulge = nextbulge
pcbnew.Refresh()
def dxf_to_mountholes(board,footprint_mapping, filepath):
dxf = dxfgrabber.readfile(filepath)
io = pcbnew.PCB_IO()
for e in dxf.entities.get_entities():
if (e.dxftype == "CIRCLE"):
print("center {} radius {}".format(e.center, e.radius))
d = str(e.radius*2)
if d not in footprint_mapping:
raise ValueError("diameter {} not found in footprint mapping".format(d))
fp = footprint_mapping[d]
mod = io.FootprintLoad(fp[0], fp[1])
mod.SetPosition(pcbpoint.pcbpoint(e.center).wxpoint())
board.Add(mod)
pcbnew.Refresh()
board = pcbnew.GetBoard()
dxf_to_graphic(board, layertable['Cmts.User'],
"/bubba/electronicsDS/fusion/leds_projection.dxf", True)
dxf_to_graphic(board, layertable['Edge.Cuts'],
"/bubba/electronicsDS/fusion/boundary_polyline.dxf")
footprint_lib = '/home/mmccoo/kicad/kicad-footprints/MountingHole.pretty'
footprint_mapping = {
"3.0": (footprint_lib, "MountingHole_3.2mm_M3")
}
dxf_to_mountholes(board, footprint_mapping, "/bubba/electronicsDS/fusion/mountingholes.dxf")
dxf_to_graphic(board, layertable['Eco1.User'],
"/bubba/electronicsDS/fusion/powerrails.dxf")
#traverse_dxf("/bubba/electronicsDS/fusion/powerrails.dxf", graphic_actions)
|
|
#!/usr/bin/env python
#
# Copyright 2016 Qumulo, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
import math
import os
import sys
import logging
import time
import xml.dom.minidom
# qumulo_client wraps all of the Qumulo REST API interactions
from qumulo_client import QumuloClient
from qumulo.lib.request import RequestError
from splunklib.modularinput import *
import splunklib.client
SPLUNK_HOME = os.environ.get("SPLUNK_HOME")
STANZA = None
EGG_DIR = os.path.join(SPLUNK_HOME, "etc", "apps", "qumulo_splunk_app", "bin")
# Import any Eggs
for filename in os.listdir(EGG_DIR):
if filename.endswith(".egg"):
sys.path.append(EGG_DIR + filename)
class QumuloScript(Script):
"""All modular inputs should inherit from the abstract base class Script
from splunklib.modularinput.script.
They must override the get_scheme and stream_events functions, and,
if the scheme returned by get_scheme has Scheme.use_external_validation
set to True, the validate_input function.
"""
def get_scheme(self):
"""When Splunk starts, it looks for all the modular inputs defined by
its configuration, and tries to run them with the argument --scheme.
Splunkd expects the modular inputs to print a description of the
input in XML on stdout. The modular input framework takes care of all
the details of formatting XML and printing it. The user need only
override get_scheme and return a new Scheme object.
:return: scheme, a Scheme object
"""
# "random_numbers" is the name Splunk will display to users for this input.
scheme = Scheme("Qumulo Splunk App")
scheme.description = "Manage Qumulo Clusters using Splunk."
# If you set external validation to True, without overriding validate_input,
# the script will accept anything as valid. Generally you only need external
# validation if there are relationships you must maintain among the
# parameters, such as requiring min to be less than max in this example,
# or you need to check that some resource is reachable or valid.
# Otherwise, Splunk lets you specify a validation string for each argument
# and will run validation internally using that string.
scheme.use_external_validation = True
scheme.use_single_instance = False
username_argument = Argument("username")
username_argument.title = "Username"
username_argument.data_type = Argument.data_type_string
username_argument.description = "Username for authentication"
username_argument.required_on_create = True
username_argument.required_on_edit = False
# If you are not using external validation, you would add something like:
#
# scheme.validation = "min > 0"
scheme.add_argument(username_argument)
password_argument = Argument("password")
password_argument.title = "Password"
password_argument.data_type = Argument.data_type_string
password_argument.description = "Password for authentication"
password_argument.required_on_create = True
password_argument.required_on_edit = False
scheme.add_argument(password_argument)
port_argument = Argument("port")
port_argument.title = "Port"
port_argument.data_type = Argument.data_type_number
port_argument.description = "Port number for Cluster API access, defaults to 8000"
port_argument.required_on_create = True
port_argument.required_on_edit = False
scheme.add_argument(port_argument)
nodehost_argument = Argument("nodehost")
nodehost_argument.title = "Host"
nodehost_argument.data_type = Argument.data_type_string
nodehost_argument.description = "Cluster hostname or IP address"
nodehost_argument.required_on_create = True
nodehost_argument.required_on_edit = False
scheme.add_argument(nodehost_argument)
endpoint_to_poll_argument = Argument("endpoint_to_poll")
endpoint_to_poll_argument.title = "Endpoint to poll"
endpoint_to_poll_argument.data_type = Argument.data_type_string
endpoint_to_poll_argument.description = "Name of endpoint to poll"
endpoint_to_poll_argument.required_on_create = True
endpoint_to_poll_argument.required_on_edit = False
scheme.add_argument(endpoint_to_poll_argument)
return scheme
def validate_input(self, validation_definition):
"""In this example we are using external validation to verify that min is
less than max. If validate_input does not raise an Exception, the input is
assumed to be valid. Otherwise it prints the exception as an error message
when telling splunkd that the configuration is invalid.
When using external validation, after splunkd calls the modular input with
--scheme to get a scheme, it calls it again with --validate-arguments for
each instance of the modular input in its configuration files, feeding XML
on stdin to the modular input to do validation. It is called the same way
whenever a modular input's configuration is edited.
:param validation_definition: a ValidationDefinition object
"""
# Get the parameters from the ValidationDefinition object,
# then typecast the values as floats
pass
def get_credentials(self, sessionKey):
"""Given a session key, get the creds for the user
"""
myapp = 'qumulo_splunk_app'
# try:
# # list all credentials
# entities = entity.getEntities(['admin', 'passwords'], namespace=myapp,
# owner='nobody', sessionKey=sessionKey)
# except Exception, e:
# raise Exception("Could not get %s credentials from splunk. Error: %s"
# % (myapp, str(e)))
# # return first set of credentials
# for i, c in entities.items():
# logging.error("entities.items: %s" % str(json.dumps(c)))
# return c
# # return c['username'], c['password']
raise Exception("No credentials have been found")
def stream_events(self, inputs, ew):
"""This function handles all the action: splunk calls this modular input
without arguments, streams XML describing the inputs to stdin, and waits
for XML on stdout describing events.
If you set use_single_instance to True on the scheme in get_scheme, it
will pass all the instances of this input to a single instance of this
script.
:param inputs: an InputDefinition object
:param ew: an EventWriter object
"""
app_config = dict()
try:
service = splunklib.client.connect(token=inputs.metadata["session_key"],
app='qumulo_splunk_app', owner="nobody")
for password in service.storage_passwords:
app_config["username"] = password.username
app_config["password"] = password.clear_password
except Exception, excpt:
logging.error("Unable to get storage_passwords: {}".format(excpt))
return
# BUGBUG: password.name from service.storage_passwords may have colon delimiters returned in
# the field value. Sanitize if so.
# TODO: Figure out why these are here and determine the prescribed way to obviate the problem.
for input_name, input_item in inputs.inputs.iteritems():
# logging.error("In stream_events, input_item is : %s" % json.dumps(input_item))
if "nodehost" in input_item:
app_config["nodehost"] = input_item["nodehost"]
if "port" in input_item:
app_config["port"] = input_item["port"]
if "endpoint_to_poll" in input_item:
endpoint_to_poll = input_item["endpoint_to_poll"]
client = QumuloClient(app_config)
if endpoint_to_poll == "throughput":
result = self.process_throughput(ew, input_name, client)
elif endpoint_to_poll == "capacity":
result = self.process_capacity(ew, input_name, client)
elif endpoint_to_poll == "iops":
result = self.process_iops(ew, input_name, client)
def process_iops(self, ew, input_name, client):
try:
iops = client.get_iops()
for op in iops:
# print_xml_stream(json.dumps(op))
# Create an Event object, and set its data fields
event = Event()
event.stanza = input_name
event.data = json.dumps(op)
ew.write_event(event)
except RequestError, excpt:
logging.error("Exception performing request for IOPS: %s" % str(excpt))
return
def process_capacity(self, ew, input_name, client):
try:
capacity = client.get_capacity()
except RequestError, excpt:
logging.error("Exception performing request for Capacity: %s" % str(excpt))
return
cap = {}
cap["free_gigabytes"] = int(long(float(capacity['free_size_bytes']))/math.pow(1024,3))
cap["raw_gigabytes"] = int(long(float(capacity['raw_size_bytes']))/math.pow(1024,3))
cap["total_gigabytes"] = int(long(float(capacity['total_size_bytes']))/math.pow(1024,3))
# print_xml_stream(json.dumps(cap))
event = Event()
event.stanza = input_name
event.data = json.dumps(cap)
ew.write_event(event)
def process_throughput(self, ew, input_name, client):
try:
throughput = client.get_throughput()
except RequestError, excpt:
logging.error("Exception performing request for Throughput: %s" % str(excpt))
return
for entry in throughput:
for i in range(len(entry['values'])):
log_entry = {}
if "throughput" in entry['id']:
log_entry['metric'] = entry['id']
log_entry['time'] = entry['times'][i]
log_entry['value'] = entry['values'][i]
# print_xml_stream(json.dumps(log_entry))
event = Event()
event.stanza = input_name
event.data = json.dumps(log_entry)
ew.write_event(event)
if __name__ == "__main__":
# read session key sent from splunkd
sessionKey = sys.stdin.readline().strip()
# username, password = getCredentials(sessionKey)
sys.exit(QumuloScript().run(sys.argv))
|
|
import numpy as np
from mpi4py import MPI
from pmesh.pm import ParticleMesh
from pySDC.core.Errors import ParameterError, ProblemError
from pySDC.core.Problem import ptype
from pySDC.playgrounds.pmesh.PMESH_datatype_NEW import pmesh_datatype, rhs_imex_pmesh
class allencahn_imex(ptype):
"""
Example implementing Allen-Cahn equation in 2-3D using PMESH for solving linear parts, IMEX time-stepping
PMESH: https://github.com/rainwoodman/pmesh
Attributes:
xvalues: grid points in space
dx: mesh width
"""
def __init__(self, problem_params, dtype_u=pmesh_datatype, dtype_f=rhs_imex_pmesh):
"""
Initialization routine
Args:
problem_params (dict): custom parameters for the example
dtype_u: pmesh data type (will be passed to parent class)
dtype_f: pmesh data type wuth implicit and explicit parts (will be passed to parent class)
"""
if 'L' not in problem_params:
problem_params['L'] = 1.0
if 'init_type' not in problem_params:
problem_params['init_type'] = 'circle'
if 'comm' not in problem_params:
problem_params['comm'] = None
if 'dw' not in problem_params:
problem_params['dw'] = 0.0
# these parameters will be used later, so assert their existence
essential_keys = ['nvars', 'eps', 'L', 'radius', 'dw']
for key in essential_keys:
if key not in problem_params:
msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))
raise ParameterError(msg)
if not (isinstance(problem_params['nvars'], tuple) and len(problem_params['nvars']) > 1):
raise ProblemError('Need at least two dimensions')
# Creating ParticleMesh structure
self.pm = ParticleMesh(BoxSize=problem_params['L'], Nmesh=list(problem_params['nvars']), dtype='f8',
plan_method='measure', comm=problem_params['comm'])
# create test RealField to get the local dimensions (there's probably a better way to do that)
tmp = self.pm.create(type='real')
tmps = tmp.r2c()
# invoke super init, passing the communicator and the local dimensions as init
super(allencahn_imex, self).__init__(init=(self.pm.comm, tmps.value.shape), dtype_u=dtype_u, dtype_f=dtype_f,
params=problem_params)
# Need this for diagnostics
self.dx = self.params.L / problem_params['nvars'][0]
self.dy = self.params.L / problem_params['nvars'][1]
self.xvalues = [i * self.dx - problem_params['L'] / 2 for i in range(problem_params['nvars'][0])]
self.yvalues = [i * self.dy - problem_params['L'] / 2 for i in range(problem_params['nvars'][1])]
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
def Laplacian(k, v):
k2 = sum(ki ** 2 for ki in k)
return -k2 * v
f = self.dtype_f(self.init)
tmp_u = self.pm.create(type='complex', value=u.values)
f.impl.values = tmp_u.apply(Laplacian).value
if self.params.eps > 0:
tmp_u = tmp_u.c2r(out=Ellipsis)
tmp_f = - 2.0 / self.params.eps ** 2 * tmp_u * (1.0 - tmp_u) * (1.0 - 2.0 * tmp_u) - \
6.0 * self.params.dw * tmp_u * (1.0 - tmp_u)
f.expl.values = tmp_f.r2c(out=Ellipsis).value
return f
def solve_system(self, rhs, factor, u0, t):
"""
Simple FFT solver for the diffusion part
Args:
rhs (dtype_f): right-hand side for the linear system
factor (float) : abbrev. for the node-to-node stepsize (or any other factor required)
u0 (dtype_u): initial guess for the iterative solver (not used here so far)
t (float): current time (e.g. for time-dependent BCs)
Returns:
dtype_u: solution as mesh
"""
def linear_solve(k, v):
k2 = sum(ki ** 2 for ki in k)
return 1.0 / (1.0 + factor * k2) * v
me = self.dtype_u(self.init)
tmp_rhs = self.pm.create(type='complex', value=rhs.values)
me.values = tmp_rhs.apply(linear_solve, out=Ellipsis).value
return me
def u_exact(self, t):
"""
Routine to compute the exact solution at time t
Args:
t (float): current time
Returns:
dtype_u: exact solution
"""
def circle(i, v):
r = [ii * (Li / ni) - 0.5 * Li for ii, ni, Li in zip(i, v.Nmesh, v.BoxSize)]
r2 = sum(ri ** 2 for ri in r)
return 0.5 * (1.0 + np.tanh((self.params.radius - np.sqrt(r2)) / (np.sqrt(2) * self.params.eps)))
def circle_rand(i, v):
L = [int(l) for l in v.BoxSize]
r = [ii * (Li / ni) - 0.5 * Li for ii, ni, Li in zip(i, v.Nmesh, L)]
rshift = r.copy()
ndim = len(r)
data = 0
# get random radii for circles/spheres
np.random.seed(1)
lbound = 3.0 * self.params.eps
ubound = 0.5 - self.params.eps
rand_radii = (ubound - lbound) * np.random.random_sample(size=tuple(L)) + lbound
# distribnute circles/spheres
if ndim == 2:
for indexi, i in enumerate(range(-L[0] + 1, L[0], 2)):
for indexj, j in enumerate(range(-L[1] + 1, L[1], 2)):
# shift x and y coordinate depending on which box we are in
rshift[0] = r[0] + i/2
rshift[1] = r[1] + j/2
# build radius
r2 = sum(ri ** 2 for ri in rshift)
# add this blob, shifted by 1 to avoid issues with adding up negative contributions
data += np.tanh((rand_radii[indexi, indexj] - np.sqrt(r2)) / (np.sqrt(2) * self.params.eps)) + 1
# get rid of the 1
data *= 0.5
assert np.all(data <= 1.0)
return data
assert t == 0, 'ERROR: u_exact only valid for t=0'
me = self.dtype_u(self.init)
if self.params.init_type == 'circle':
tmp_u = self.pm.create(type='real', value=0.0)
tmp_u.apply(circle, kind='index', out=Ellipsis)
me.values = tmp_u.r2c().value
elif self.params.init_type == 'circle_rand':
tmp_u = self.pm.create(type='real', value=0.0)
tmp_u.apply(circle_rand, kind='index', out=Ellipsis)
me.values = tmp_u.r2c().value
else:
raise NotImplementedError('type of initial value not implemented, got %s' % self.params.init_type)
return me
class allencahn_imex_timeforcing(allencahn_imex):
"""
Example implementing Allen-Cahn equation in 2-3D using PMESH for solving linear parts, IMEX time-stepping,
time-dependent forcing
"""
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
def Laplacian(k, v):
k2 = sum(ki ** 2 for ki in k)
return -k2 * v
f = self.dtype_f(self.init)
tmp_u = self.pm.create(type='real', value=u.values)
f.impl.values = tmp_u.r2c().apply(Laplacian, out=Ellipsis).c2r(out=Ellipsis).value
if self.params.eps > 0:
f.expl.values = - 2.0 / self.params.eps ** 2 * u.values * (1.0 - u.values) * (1.0 - 2.0 * u.values)
# build sum over RHS without driving force
Rt_local = f.impl.values.sum() + f.expl.values.sum()
if self.pm.comm is not None:
Rt_global = self.pm.comm.allreduce(sendobj=Rt_local, op=MPI.SUM)
else:
Rt_global = Rt_local
# build sum over driving force term
Ht_local = np.sum(6.0 * u.values * (1.0 - u.values))
if self.pm.comm is not None:
Ht_global = self.pm.comm.allreduce(sendobj=Ht_local, op=MPI.SUM)
else:
Ht_global = Rt_local
# add/substract time-dependent driving force
dw = Rt_global / Ht_global
f.expl.values -= 6.0 * dw * u.values * (1.0 - u.values)
return f
class allencahn_imex_stab(allencahn_imex):
"""
Example implementing Allen-Cahn equation in 2-3D using PMESH for solving linear parts, IMEX time-stepping with
stabilized splitting
"""
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
def Laplacian(k, v):
k2 = sum(ki ** 2 for ki in k) + 1.0 / self.params.eps ** 2
return -k2 * v
f = self.dtype_f(self.init)
tmp_u = self.pm.create(type='real', value=u.values)
f.impl.values = tmp_u.r2c().apply(Laplacian, out=Ellipsis).c2r(out=Ellipsis).value
if self.params.eps > 0:
f.expl.values = - 2.0 / self.params.eps ** 2 * u.values * (1.0 - u.values) * (1.0 - 2.0 * u.values) - \
6.0 * self.params.dw * u.values * (1.0 - u.values) + \
1.0 / self.params.eps ** 2 * u.values
return f
def solve_system(self, rhs, factor, u0, t):
"""
Simple FFT solver for the diffusion part
Args:
rhs (dtype_f): right-hand side for the linear system
factor (float) : abbrev. for the node-to-node stepsize (or any other factor required)
u0 (dtype_u): initial guess for the iterative solver (not used here so far)
t (float): current time (e.g. for time-dependent BCs)
Returns:
dtype_u: solution as mesh
"""
def linear_solve(k, v):
k2 = sum(ki ** 2 for ki in k) + 1.0 / self.params.eps ** 2
return 1.0 / (1.0 + factor * k2) * v
me = self.dtype_u(self.init)
tmp_rhs = self.pm.create(type='real', value=rhs.values)
me.values = tmp_rhs.r2c().apply(linear_solve, out=Ellipsis).c2r(out=Ellipsis).value
return me
|
|
# -*- coding:utf-8 -*-
import datetime
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django.core.urlresolvers import reverse
from django.conf import settings
from django_messages.models import Message
from django_messages.forms import ComposeForm
from django_messages.utils import format_quote
def inbox(request, template_name='django_messages/inbox.html'):
"""
Displays a list of received messages for the current user.
Optional Arguments:
``template_name``: name of the template to use.
"""
message_list = Message.objects.inbox_for(request.user)
return render_to_response(template_name, {
'message_list': message_list,
}, context_instance=RequestContext(request))
inbox = login_required(inbox)
def outbox(request, template_name='django_messages/outbox.html'):
"""
Displays a list of sent messages by the current user.
Optional arguments:
``template_name``: name of the template to use.
"""
message_list = Message.objects.outbox_for(request.user)
return render_to_response(template_name, {
'message_list': message_list,
}, context_instance=RequestContext(request))
outbox = login_required(outbox)
def trash(request, template_name='django_messages/trash.html'):
"""
Displays a list of deleted messages.
Optional arguments:
``template_name``: name of the template to use
Hint: A Cron-Job could periodicly clean up old messages, which are deleted
by sender and recipient.
"""
message_list = Message.objects.trash_for(request.user)
return render_to_response(template_name, {
'message_list': message_list,
}, context_instance=RequestContext(request))
trash = login_required(trash)
def compose(request, recipient=None, form_class=ComposeForm,
template_name='django_messages/compose.html', success_url=None, recipient_filter=None):
"""
Displays and handles the ``form_class`` form to compose new messages.
Required Arguments: None
Optional Arguments:
``recipient``: username of a `django.contrib.auth` User, who should
receive the message, optionally multiple usernames
could be separated by a '+'
``form_class``: the form-class to use
``template_name``: the template to use
``success_url``: where to redirect after successfull submission
"""
if request.method == "POST":
sender = request.user
form = form_class(request.POST, recipient_filter=recipient_filter)
if form.is_valid():
form.save(sender=request.user)
messages.add_message(request, messages.SUCCESS, _(u"Message successfully sent."))
if success_url is None:
success_url = reverse('messages_inbox')
if request.GET.has_key('next'):
success_url = request.GET['next']
return HttpResponseRedirect(success_url)
else:
form = form_class()
if recipient is not None:
recipients = [u.username for u in get_user_model().objects.filter(username__in=[r.strip() for r in recipient.split('+')])]
form.fields['recipient'].initial = ','.join(recipients)
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
compose = login_required(compose)
def reply(request, message_id, form_class=ComposeForm,
template_name='django_messages/compose.html', success_url=None, recipient_filter=None,
quote=format_quote):
"""
Prepares the ``form_class`` form for writing a reply to a given message
(specified via ``message_id``). It uses the ``format_quote`` helper from
``messages.utils`` (there is also format_linebreaks_quote defined) to pre-format
the quote by default but you can use different formater.
"""
parent = get_object_or_404(Message, id=message_id)
if parent.sender != request.user and parent.recipient != request.user:
raise Http404
if request.method == "POST":
sender = request.user
form = form_class(request.POST, recipient_filter=recipient_filter)
if form.is_valid():
form.save(sender=request.user, parent_msg=parent)
messages.add_message(request, messages.SUCCESS, _(u"Message successfully sent."))
if success_url is None:
success_url = reverse('messages_inbox')
return HttpResponseRedirect(success_url)
else:
form = form_class({
'body': quote(parent.sender, parent.body),
'subject': _(u"Re: %(subject)s") % {'subject': parent.subject},
'recipient': [parent.sender,]
})
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
reply = login_required(reply)
def delete(request, message_id, success_url=None):
"""
Marks a message as deleted by sender or recipient. The message is not
really removed from the database, because two users must delete a message
before it's save to remove it completely.
A cron-job should prune the database and remove old messages which are
deleted by both users.
As a side effect, this makes it easy to implement a trash with undelete.
You can pass ?next=/foo/bar/ via the url to redirect the user to a different
page (e.g. `/foo/bar/`) than ``success_url`` after deletion of the message.
"""
user = request.user
now = datetime.datetime.now()
message = get_object_or_404(Message, id=message_id)
deleted = False
if success_url is None:
success_url = reverse('messages_inbox')
if request.GET.has_key('next'):
success_url = request.GET['next']
if message.sender == user:
message.sender_deleted_at = now
deleted = True
if message.recipient == user:
message.recipient_deleted_at = now
deleted = True
if deleted:
message.save()
messages.add_message(request, messages.SUCCESS, _(u"Message successfully deleted."))
return HttpResponseRedirect(success_url)
raise Http404
delete = login_required(delete)
def undelete(request, message_id, success_url=None):
"""
Recovers a message from trash. This is achieved by removing the
``(sender|recipient)_deleted_at`` from the model.
"""
user = request.user
message = get_object_or_404(Message, id=message_id)
undeleted = False
if success_url is None:
success_url = reverse('messages_inbox')
if request.GET.has_key('next'):
success_url = request.GET['next']
if message.sender == user:
message.sender_deleted_at = None
undeleted = True
if message.recipient == user:
message.recipient_deleted_at = None
undeleted = True
if undeleted:
message.save()
messages.add_message(request, messages.SUCCESS, _(u"Message successfully recovered."))
return HttpResponseRedirect(success_url)
raise Http404
undelete = login_required(undelete)
def view(request, message_id, template_name='django_messages/view.html'):
"""
Shows a single message.``message_id`` argument is required.
The user is only allowed to see the message, if he is either
the sender or the recipient. If the user is not allowed a 404
is raised.
If the user is the recipient and the message is unread
``read_at`` is set to the current datetime.
"""
user = request.user
now = datetime.datetime.now()
message = get_object_or_404(Message, id=message_id)
if (message.sender != user) and (message.recipient != user):
raise Http404
if message.read_at is None and message.recipient == user:
message.read_at = now
message.save()
return render_to_response(template_name, {
'message': message,
}, context_instance=RequestContext(request))
view = login_required(view)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spin-Weighted Spherical CNN layers.
This implements the layers used in Esteves et al, "Spin-Weighted Spherical
CNNs", NeurIPS'20 [1].
Since the spin-weighted spherical convolution (SWSConv) is defined between sets
of spin-weighted spherical functions (SWSFs) of different spin weights, and in a
CNN we have a batch dimension and multiple channels per layer, in this module we
make use of 5D arrays stacking the mini-batch, spins and channels. For the
spatial equiangular representation the dimensions are (batch, lat, long, spin,
channel), and for spectral coefficients, the dimensions are (batch, ell, m,
spin, channel).
"""
import functools
from typing import Any, Callable, Optional, Sequence, Union
from flax import linen as nn
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
from spin_spherical_cnns import sphere_utils
from spin_spherical_cnns import spin_spherical_harmonics
Array = Union[np.ndarray, jnp.ndarray]
Initializer = Callable[[Any, Sequence[int], Any],
Array]
def _swsconv_spatial_spectral(transformer, sphere_set, filter_coefficients,
spins_in, spins_out):
r"""Spin-weighted spherical convolution; spatial input and spectral filters.
This implements a multi-channel version of Eq. (13) in [1], where sphere_set
corresponds to F and filter_coefficients to \hat{K}. For convenience, the
inputs and outputs are in the spatial domain but the filter is defined by its
Fourier coefficients, since this is what is learned in [1].
The multi-channel behavior is the usual in CNNs: for a input with n_in
channels, we have n_in * n_out filters and n_out output channels, each of
which is the sum over n_in filter outputs.
Args:
transformer: SpinSphericalFourierTransformer instance.
sphere_set: A (resolution, resolution, n_spins_in, n_channels_in) array of
spin-weighted spherical functions with equiangular sampling.
filter_coefficients: (resolution // 2, n_spins_in, n_spins_out,
n_channels_in, n_channels_out) array of filter SWSH coefficients.
spins_in: (n_spins_in,) Sequence of int containing the input spins.
spins_out: (n_spins_out,) Sequence of int containing the output spins.
Returns:
A (resolution, resolution, n_spins_out, n_channels_out) array of
spin-weighted spherical functions with equiangular sampling.
"""
# Convert input swsfs to the spectral domain.
coefficients_in = transformer.swsft_forward_spins_channels(sphere_set,
spins_in)
# Compute the convolution in the spectral domain.
coefficients_out = jnp.einsum("lmic,liocd->lmod",
coefficients_in,
filter_coefficients)
# Convert back to the spatial domain.
return transformer.swsft_backward_spins_channels(coefficients_out, spins_out)
# Custom initializer, based on He et al, "Delving Deep into Rectifiers", but
# complex.
default_initializer = nn.initializers.variance_scaling(scale=2.0,
mode="fan_in",
distribution="normal",
dtype=jnp.complex64)
class SpinSphericalConvolution(nn.Module):
"""Spin-weighted spherical convolutional layer.
Wraps _swsconv_spatial_spectral(), initializing and keeping track of the
learnable filter.
Attributes:
features: int, number of output features (channels).
spins_in: (n_spins_in,) Sequence of int containing the input spins.
spins_out: (n_spins_out,) Sequence of int containing the output spins.
transformer: SpinSphericalFourierTransformer instance.
num_filter_params: Number of parameters per filter. Fewer parameters results
in more localized filters.
initializer: initializer for the filter spectrum.
"""
features: int
spins_in: Sequence[int]
spins_out: Sequence[int]
transformer: spin_spherical_harmonics.SpinSphericalFourierTransformer
num_filter_params: Optional[int] = None
initializer: Initializer = default_initializer
def _get_kernel(self, ell_max, num_channels_in):
kernel_shape = (ell_max+1, len(self.spins_in), len(self.spins_out),
num_channels_in, self.features)
return self.param("kernel", self.initializer, kernel_shape)
def _get_localized_kernel(self, ell_max, num_channels_in):
# We interpolate along ell to obtain all weights from the learnable weights,
# hence it doesn't make sense to have more parameters than num_ell.
if self.num_filter_params > ell_max + 1:
raise ValueError("num_filter_params must be <= ell_max + 1")
ell_in = jnp.linspace(0, 1, self.num_filter_params)
ell_out = jnp.linspace(0, 1, ell_max + 1)
# `vectorize` is over leading dimensions, so we put ell as the last
# dimension and transpose it to the first later.
learnable_shape = (len(self.spins_in), len(self.spins_out),
num_channels_in, self.features,
self.num_filter_params)
learnable_weights = self.param("kernel", self.initializer, learnable_shape)
# `jnp.interp` works on 1D inputs; we vectorize it to interpolate over a
# single dimension of n-D inputs.
vectorized_interp = jnp.vectorize(jnp.interp, signature="(m),(n),(n)->(m)")
weights = vectorized_interp(ell_out, ell_in, learnable_weights)
# Make ell the first dimension.
return weights.transpose((4, 0, 1, 2, 3))
@nn.compact
def __call__(self, sphere_set):
"""Applies convolution to inputs.
Args:
sphere_set: A (batch_size, resolution, resolution, n_spins_in,
n_channels_in) array of spin-weighted spherical functions (SWSF) with
equiangular sampling.
Returns:
A (batch_size, resolution, resolution, n_spins_out, n_channels_out)
complex64 array of SWSF with equiangular H&W sampling.
"""
resolution = sphere_set.shape[1]
if sphere_set.shape[2] != resolution:
raise ValueError("Axes 1 and 2 must have the same dimensions!")
if sphere_set.shape[3] != len(list(self.spins_in)):
raise ValueError("Input axis 3 (spins_in) doesn't match layer's.")
# Make sure constants contain all spins for input resolution.
for spin in set(self.spins_in).union(self.spins_out):
if not self.transformer.validate(resolution, spin):
raise ValueError("Constants are invalid for given input!")
ell_max = sphere_utils.ell_max_from_resolution(resolution)
num_channels_in = sphere_set.shape[-1]
if self.num_filter_params is None:
kernel = self._get_kernel(ell_max, num_channels_in)
else:
kernel = self._get_localized_kernel(ell_max, num_channels_in)
# Map over the batch dimension.
vmap_convolution = jax.vmap(_swsconv_spatial_spectral,
in_axes=(None, 0, None, None, None))
return vmap_convolution(self.transformer,
sphere_set, kernel,
self.spins_in,
self.spins_out)
class MagnitudeNonlinearity(nn.Module):
"""Magnitude thresholding nonlinearity, suitable for complex inputs.
Executes the following operation pointwise: z = relu(|z|+b) * (z / |z|), where
b is a learned bias per spin per channel.
NOTE(machc): This operation does not preserve bandwidth and is pointwise. It
is only approximately equivariant for the equiangular spherical
discretization. See `layers_test.MagnitudeNonlinearityTest` for quantitative
evaluations of the equivariance error.
Attributes:
epsilon: Small float constant to avoid division by zero.
bias_initializer: initializer for the bias (default to zeroes).
"""
epsilon: jnp.float32 = 1e-6
bias_initializer: Initializer = nn.initializers.zeros
@nn.compact
def __call__(self, inputs):
"""Applies pointwise nonlinearity to 5D inputs."""
bias = self.param("bias", self.bias_initializer,
(1, 1, 1, inputs.shape[-2], inputs.shape[-1]))
modulus_inputs = jnp.abs(inputs)
return (nn.relu(modulus_inputs + bias) *
(inputs / (modulus_inputs + self.epsilon)))
class MagnitudeNonlinearityLeakyRelu(nn.Module):
"""Applies MagnitudeNonlinearity to spin != 0 and leaky relu for spin == 0.
The spin == 0 component does not change phase upon rotation, so any pointwise
nonlinearity works. Here we choose the leaky relu.
Attributes:
spins: (n_spins,) Sequence of int containing the input spins.
epsilon: Small float constant to avoid division by zero.
bias_initializer: initializer for the spin != 0 bias (default to zeroes).
"""
spins: Sequence[int]
epsilon: jnp.float32 = 1e-6
bias_initializer: Initializer = nn.initializers.zeros
@nn.compact
def __call__(self, inputs):
"""Applies pointwise nonlinearity to 5D inputs."""
outputs = []
for i, spin in enumerate(self.spins):
inputs_spin = inputs[Ellipsis, [i], :]
if spin == 0:
# In [1], the spin0 inputs are cast to real at every
# layer. Here we merge this operation with the nonlinearity.
outputs_spin = nn.leaky_relu(inputs_spin.real)
else:
outputs_spin = MagnitudeNonlinearity(
self.epsilon, self.bias_initializer,
name=f"magnitude_nonlin_{i}")(inputs_spin)
outputs.append(outputs_spin)
return jnp.concatenate(outputs, axis=-2)
class SphericalPooling(nn.Module):
"""Spherical pooling layer, accounting for cell area variation.
Executes a weighted average pooling, with weights proportional to the H&W
quadrature scheme. However, the pooling here is a local operation, so we don't
use the toroidal extension.
NOTE(machc): This operation has multiple sources of equivariance errors. A
reasonable alternative that is perfectly equivariant is to drop high
frequencies in the spectral domain right after the convolution. We have
experimented with this approach long ago and found it underperforming, but it
is probably worth revisiting.
Attributes:
stride: int, pooling stride and window shape are (stride, stride).
"""
stride: int
@nn.compact
def __call__(self, inputs):
"""Applies spherical pooling.
Args:
inputs: An array of dimensions (batch_size, resolution, resolution,
n_spins_in, n_channels_in).
Returns:
An array of dimensions (batch_size, resolution // stride, resolution //
stride, n_spins_in, n_channels_in).
"""
# We use variables to cache the in/out weights.
resolution_in = inputs.shape[1]
resolution_out = resolution_in // self.stride
weights_in = sphere_utils.sphere_quadrature_weights(resolution_in)
weights_out = sphere_utils.sphere_quadrature_weights(resolution_out)
weighted = inputs * jnp.expand_dims(weights_in, (0, 2, 3, 4))
pooled = nn.avg_pool(weighted,
window_shape=(self.stride, self.stride, 1),
strides=(self.stride, self.stride, 1))
# This was average pooled. We multiply by stride**2 to obtain the sum
# pooled, then divide by output weights to get the weighted average.
pooled = (pooled * self.stride**2 /
jnp.expand_dims(weights_out, (0, 2, 3, 4)))
return pooled
_complex_ones_initializer = functools.partial(nn.initializers.ones,
dtype=jnp.complex64)
_complex_zeros_initializer = functools.partial(nn.initializers.zeros,
dtype=jnp.complex64)
class SphericalBatchNormalization(nn.Module):
"""Batch normalization for spherical functions.
Two main changes with respect to the usual nn.BatchNorm:
1) Subtracting a complex value is not rotation-equivariant for spin-weighted
functions, so we add an option to not subtract the mean and only keep
track of and divide by the variance.
2) Mean and variance computation on the sphere must take into account the
discretization cell areas.
Attributes:
use_running_stats: if True, the statistics stored in batch_stats
will be used instead of computing the batch statistics on the input.
momentum: decay rate for the exponential moving average of
the batch statistics.
centered: When False, skips mean-subtraction step.
epsilon: a small float added to variance to avoid dividing by zero.
use_bias: if True, add a complex-valued learned bias.
use_scale: if True, multiply by a complex-valued learned scale.
bias_init: initializer for bias, by default, zero.
scale_init: initializer for scale, by default, one.
axis_name: the axis name used to combine batch statistics from multiple
devices. See `jax.pmap` for a description of axis names (default: None).
"""
use_running_stats: Optional[bool] = None
momentum: float = 0.99
epsilon: float = 1e-5
centered: bool = True
use_bias: bool = True
use_scale: bool = True
bias_init: Initializer = _complex_zeros_initializer
scale_init: Initializer = _complex_ones_initializer
axis_name: Optional[str] = None
@nn.compact
def __call__(self,
inputs,
use_running_stats = None,
weights = None):
"""Normalizes the input using batch (optional) means and variances.
Stats are computed over the batch and spherical dimensions: (0, 1, 2).
Args:
inputs: An array of dimensions (batch_size, resolution, resolution,
n_spins_in, n_channels_in).
use_running_stats: if true, the statistics stored in batch_stats will be
used instead of computing the batch statistics on the input.
weights: An array of dimensions (batch_size,) assigning weights for
each batch element. Useful for masking.
Returns:
Normalized inputs (the same shape as inputs).
"""
use_running_stats = nn.module.merge_param(
"use_running_stats", self.use_running_stats, use_running_stats)
# Normalization is independent per spin per channel.
num_spins, num_channels = inputs.shape[-2:]
feature_shape = (1, 1, 1, num_spins, num_channels)
reduced_feature_shape = (num_spins, num_channels)
initializing = not self.has_variable("batch_stats", "variance")
running_variance = self.variable("batch_stats", "variance",
lambda s: jnp.ones(s, jnp.float32),
reduced_feature_shape)
if self.centered:
running_mean = self.variable("batch_stats", "mean",
lambda s: jnp.zeros(s, jnp.complex64),
reduced_feature_shape)
if use_running_stats:
variance = running_variance.value
if self.centered:
mean = running_mean.value
else:
# Compute the spherical mean over the spherical grid dimensions, then a
# conventional mean over the batch.
if self.centered:
mean = sphere_utils.spin_spherical_mean(inputs)
mean = jnp.average(mean, axis=0, weights=weights)
# Complex variance is E[x x*] - E[x]E[x*].
# For spin != 0, E[x] should be zero, although due to discretization this
# is not always true. We only use E[x x*] here.
# E[x x*]:
mean_abs_squared = sphere_utils.spin_spherical_mean(inputs *
inputs.conj())
mean_abs_squared = jnp.average(mean_abs_squared, axis=0, weights=weights)
# Aggregate means over devices.
if self.axis_name is not None and not initializing:
if self.centered:
mean = lax.pmean(mean, axis_name=self.axis_name)
mean_abs_squared = lax.pmean(mean_abs_squared, axis_name=self.axis_name)
# Imaginary part is negligible.
variance = mean_abs_squared.real
if not initializing:
running_variance.value = (self.momentum * running_variance.value +
(1 - self.momentum) * variance)
if self.centered:
running_mean.value = (self.momentum * running_mean.value +
(1 - self.momentum) * mean)
if self.centered:
outputs = inputs - mean.reshape(feature_shape)
else:
outputs = inputs
factor = lax.rsqrt(variance.reshape(feature_shape) + self.epsilon)
if self.use_scale:
scale = self.param("scale",
self.scale_init,
reduced_feature_shape).reshape(feature_shape)
factor = factor * scale
outputs = outputs * factor
if self.use_bias:
bias = self.param("bias",
self.bias_init,
reduced_feature_shape).reshape(feature_shape)
outputs = outputs + bias
return outputs
class SpinSphericalBatchNormalization(nn.Module):
"""Batch normalization for spin-spherical functions.
This uses the default SphericalBatchNormalization for spin == 0 and the
centered version for other spins.
Attributes:
spins: (n_spins,) Sequence of int containing the input spins.
use_running_stats: if True, the statistics stored in batch_stats
will be used instead of computing the batch statistics on the input.
momentum: decay rate for the exponential moving average of
the batch statistics.
epsilon: a small float added to variance to avoid dividing by zero.
axis_name: the axis name used to combine batch statistics from multiple
devices. See `jax.pmap` for a description of axis names (default: None).
"""
spins: Sequence[int]
use_running_stats: Optional[bool] = None
momentum: float = 0.99
epsilon: float = 1e-5
axis_name: Optional[str] = None
@nn.compact
def __call__(self,
inputs,
use_running_stats = None,
weights = None):
"""Call appropriate version of SphericalBatchNormalization per spin."""
use_running_stats = nn.module.merge_param(
"use_running_stats", self.use_running_stats, use_running_stats)
options = dict(use_running_stats=use_running_stats,
momentum=self.momentum,
epsilon=self.epsilon,
axis_name=self.axis_name)
outputs = []
for i, spin in enumerate(self.spins):
inputs_spin = inputs[Ellipsis, [i], :]
if spin == 0:
outputs_spin = SphericalBatchNormalization(use_bias=True,
centered=True,
**options)(inputs_spin,
weights=weights)
else:
outputs_spin = SphericalBatchNormalization(use_bias=False,
centered=False,
**options)(inputs_spin,
weights=weights)
outputs.append(outputs_spin)
return jnp.concatenate(outputs, axis=-2)
class SpinSphericalBatchNormMagnitudeNonlin(nn.Module):
"""Combine batch normalization and nonlinarity for spin-spherical functions.
This layer is equivalent to running SpinSphericalBatchNormalization followed
by MagnitudeNonlinearityLeakyRelu, but is faster because it splits the
computation for spin zero and spin nonzero only once.
Attributes:
spins: (n_spins,) Sequence of int containing the input spins.
use_running_stats: if True, the statistics stored in batch_stats
will be used instead of computing the batch statistics on the input.
momentum: decay rate for the exponential moving average of
the batch statistics.
epsilon: a small float added to variance to avoid dividing by zero.
axis_name: the axis name used to combine batch statistics from multiple
devices. See `jax.pmap` for a description of axis names (default: None).
bias_initializer: initializer for MagnitudeNonlinearity bias, by default,
zero.
"""
spins: Sequence[int]
use_running_stats: Optional[bool] = None
momentum: float = 0.99
epsilon: float = 1e-5
axis_name: Optional[str] = None
bias_initializer: Initializer = nn.initializers.zeros
@nn.compact
def __call__(self,
inputs,
use_running_stats = None,
weights = None):
"""Calls appropriate batch normalization and nonlinearity per spin."""
use_running_stats = nn.module.merge_param(
"use_running_stats", self.use_running_stats, use_running_stats)
options = dict(use_running_stats=use_running_stats,
momentum=self.momentum,
epsilon=self.epsilon,
axis_name=self.axis_name)
outputs = []
for i, spin in enumerate(self.spins):
inputs_spin = inputs[Ellipsis, [i], :]
if spin == 0:
outputs_spin = SphericalBatchNormalization(use_bias=True,
centered=True,
**options)(inputs_spin,
weights=weights)
outputs_spin = nn.leaky_relu(outputs_spin.real)
else:
outputs_spin = SphericalBatchNormalization(use_bias=False,
centered=False,
**options)(inputs_spin,
weights=weights)
outputs_spin = MagnitudeNonlinearity(
bias_initializer=self.bias_initializer,
name=f"magnitude_nonlin_{i}")(outputs_spin)
outputs.append(outputs_spin)
return jnp.concatenate(outputs, axis=-2)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from numbers import Number
import warnings
from pathlib import Path
from pymatgen.analysis.phase_diagram import *
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.core.periodic_table import Element, DummySpecie
from pymatgen.core.composition import Composition
from pymatgen.entries.entry_tools import EntrySet
module_dir = Path(__file__).absolute().parent
class PDEntryTest(unittest.TestCase):
'''
Test all functions using a ficitious entry
'''
def setUp(self):
comp = Composition("LiFeO2")
self.entry = PDEntry(comp, 53)
self.gpentry = GrandPotPDEntry(self.entry, {Element('O'): 1.5})
def test_get_energy(self):
self.assertEqual(self.entry.energy, 53, "Wrong energy!")
self.assertEqual(self.gpentry.energy, 50, "Wrong energy!")
def test_get_energy_per_atom(self):
self.assertEqual(self.entry.energy_per_atom, 53.0 / 4,
"Wrong energy per atom!")
self.assertEqual(self.gpentry.energy_per_atom, 50.0 / 2,
"Wrong energy per atom!")
def test_get_name(self):
self.assertEqual(self.entry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(self.gpentry.name, 'LiFeO2', "Wrong name!")
def test_get_composition(self):
comp = self.entry.composition
expected_comp = Composition('LiFeO2')
self.assertEqual(comp, expected_comp, "Wrong composition!")
comp = self.gpentry.composition
expected_comp = Composition("LiFe")
self.assertEqual(comp, expected_comp, "Wrong composition!")
def test_is_element(self):
self.assertFalse(self.entry.is_element)
self.assertFalse(self.gpentry.is_element)
def test_to_from_dict(self):
d = self.entry.as_dict()
gpd = self.gpentry.as_dict()
entry = PDEntry.from_dict(d)
self.assertEqual(entry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(entry.energy_per_atom, 53.0 / 4)
gpentry = GrandPotPDEntry.from_dict(gpd)
self.assertEqual(gpentry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(gpentry.energy_per_atom, 50.0 / 2)
d_anon = d.copy()
del d_anon['name']
try:
entry = PDEntry.from_dict(d_anon)
except KeyError:
self.fail("Should not need to supply name!")
def test_str(self):
self.assertIsNotNone(str(self.entry))
def test_read_csv(self):
entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.assertEqual(entries.chemsys, {'Li', 'Fe', 'O'},
"Wrong elements!")
self.assertEqual(len(entries), 492, "Wrong number of entries!")
class TransformedPDEntryTest(unittest.TestCase):
'''
Test all functions using a ficitious entry
'''
def setUp(self):
comp = Composition("LiFeO2")
entry = PDEntry(comp, 53)
self.transformed_entry = TransformedPDEntry({DummySpecie('Xa'): 1,
DummySpecie("Xb"): 1},
entry)
def test_get_energy(self):
self.assertEqual(self.transformed_entry.energy, 53, "Wrong energy!")
self.assertEqual(self.transformed_entry.original_entry.energy, 53.0)
def test_get_energy_per_atom(self):
self.assertEqual(self.transformed_entry.energy_per_atom, 53.0 / 2)
def test_get_name(self):
self.assertEqual(self.transformed_entry.name, 'LiFeO2', "Wrong name!")
def test_get_composition(self):
comp = self.transformed_entry.composition
expected_comp = Composition({DummySpecie('Xa'): 1,
DummySpecie('Xb'): 1})
self.assertEqual(comp, expected_comp, "Wrong composition!")
def test_is_element(self):
self.assertFalse(self.transformed_entry.is_element)
def test_to_from_dict(self):
d = self.transformed_entry.as_dict()
entry = TransformedPDEntry.from_dict(d)
self.assertEqual(entry.name, 'LiFeO2', "Wrong name!")
self.assertEqual(entry.energy_per_atom, 53.0 / 2)
def test_str(self):
self.assertIsNotNone(str(self.transformed_entry))
class PhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = PhaseDiagram(self.entries)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
# Ensure that a bad set of entries raises a PD error. Remove all Li
# from self.entries.
entries = filter(lambda e: (not e.composition.is_element) or
e.composition.elements[0] != Element("Li"),
self.entries)
self.assertRaises(PhaseDiagramError, PhaseDiagram, entries)
def test_dim1(self):
# Ensure that dim 1 PDs can eb generated.
for el in ["Li", "Fe", "O2"]:
entries = [e for e in self.entries
if e.composition.reduced_formula == el]
pd = PhaseDiagram(entries)
self.assertEqual(len(pd.stable_entries), 1)
for e in entries:
decomp, ehull = pd.get_decomp_and_e_above_hull(e)
self.assertGreaterEqual(ehull, 0)
plotter = PDPlotter(pd)
lines, stable_entries, unstable_entries = plotter.pd_plot_data
self.assertEqual(lines[0][1], [0, 0])
def test_stable_entries(self):
stable_formulas = [ent.composition.reduced_formula
for ent in self.pd.stable_entries]
expected_stable = ["Fe2O3", "Li5FeO4", "LiFeO2", "Fe3O4", "Li", "Fe",
"Li2O", "O2", "FeO"]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas,
formula + " not in stable entries!")
def test_get_formation_energy(self):
stable_formation_energies = {ent.composition.reduced_formula:
self.pd.get_form_energy(ent)
for ent in self.pd.stable_entries}
expected_formation_energies = {'Li5FeO4': -164.8117344866667,
'Li2O2': -14.119232793333332,
'Fe2O3': -16.574164339999996,
'FeO': -5.7141519966666685, 'Li': 0.0,
'LiFeO2': -7.732752316666666,
'Li2O': -6.229303868333332,
'Fe': 0.0, 'Fe3O4': -22.565714456666683,
'Li2FeO3': -45.67166036000002,
'O2': 0.0}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(
energy, stable_formation_energies[formula], 7)
def test_all_entries_hulldata(self):
self.assertEqual(len(self.pd.all_entries_hulldata), 492)
def test_planar_inputs(self):
e1 = PDEntry('H', 0)
e2 = PDEntry('He', 0)
e3 = PDEntry('Li', 0)
e4 = PDEntry('Be', 0)
e5 = PDEntry('B', 0)
e6 = PDEntry('Rb', 0)
pd = PhaseDiagram([e1, e2, e3, e4, e5, e6],
map(Element, ['Rb', 'He', 'B', 'Be', 'Li', 'H']))
self.assertEqual(len(pd.facets), 1)
def test_str(self):
self.assertIsNotNone(str(self.pd))
def test_get_e_above_hull(self):
for entry in self.pd.stable_entries:
self.assertLess(self.pd.get_e_above_hull(entry), 1e-11,
"Stable entries should have e above hull of zero!")
for entry in self.pd.all_entries:
if entry not in self.pd.stable_entries:
e_ah = self.pd.get_e_above_hull(entry)
self.assertGreaterEqual(e_ah, 0)
self.assertTrue(isinstance(e_ah, Number))
def test_get_equilibrium_reaction_energy(self):
for entry in self.pd.stable_entries:
self.assertLessEqual(
self.pd.get_equilibrium_reaction_energy(entry), 0,
"Stable entries should have negative equilibrium reaction energy!")
def test_get_decomposition(self):
for entry in self.pd.stable_entries:
self.assertEqual(len(self.pd.get_decomposition(entry.composition)), 1,
"Stable composition should have only 1 decomposition!")
dim = len(self.pd.elements)
for entry in self.pd.all_entries:
ndecomp = len(self.pd.get_decomposition(entry.composition))
self.assertTrue(ndecomp > 0 and ndecomp <= dim,
"The number of decomposition phases can at most be equal to the number of components.")
# Just to test decomp for a ficitious composition
ansdict = {entry.composition.formula: amt
for entry, amt in
self.pd.get_decomposition(Composition("Li3Fe7O11")).items()}
expected_ans = {"Fe2 O2": 0.0952380952380949,
"Li1 Fe1 O2": 0.5714285714285714,
"Fe6 O8": 0.33333333333333393}
for k, v in expected_ans.items():
self.assertAlmostEqual(ansdict[k], v)
def test_get_transition_chempots(self):
for el in self.pd.elements:
self.assertLessEqual(len(self.pd.get_transition_chempots(el)),
len(self.pd.facets))
def test_get_element_profile(self):
for el in self.pd.elements:
for entry in self.pd.stable_entries:
if not (entry.composition.is_element):
self.assertLessEqual(len(self.pd.get_element_profile(el, entry.composition)),
len(self.pd.facets))
expected = [{'evolution': 1.0,
'chempot': -4.2582781416666666,
'reaction': 'Li2O + 0.5 O2 -> Li2O2'},
{'evolution': 0,
'chempot': -5.0885906699999968,
'reaction': 'Li2O -> Li2O'},
{'evolution': -1.0,
'chempot': -10.487582010000001,
'reaction': 'Li2O -> 2 Li + 0.5 O2'}]
result = self.pd.get_element_profile(Element('O'), Composition('Li2O'))
for d1, d2 in zip(expected, result):
self.assertAlmostEqual(d1['evolution'], d2['evolution'])
self.assertAlmostEqual(d1['chempot'], d2['chempot'])
self.assertEqual(d1['reaction'], str(d2['reaction']))
def test_get_get_chempot_range_map(self):
elements = [el for el in self.pd.elements if el.symbol != "Fe"]
self.assertEqual(len(self.pd.get_chempot_range_map(elements)), 10)
def test_getmu_vertices_stability_phase(self):
results = self.pd.getmu_vertices_stability_phase(Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(len(results), 6)
test_equality = False
for c in results:
if abs(c[Element("O")] + 7.115) < 1e-2 and abs(c[Element("Fe")] + 6.596) < 1e-2 and \
abs(c[Element("Li")] + 3.931) < 1e-2:
test_equality = True
self.assertTrue(test_equality, "there is an expected vertex missing in the list")
def test_getmu_range_stability_phase(self):
results = self.pd.get_chempot_range_stability_phase(
Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(results[Element("O")][1], -4.4501812249999997)
self.assertAlmostEqual(results[Element("Fe")][0], -6.5961470999999996)
self.assertAlmostEqual(results[Element("Li")][0], -3.6250022625000007)
def test_get_hull_energy(self):
for entry in self.pd.stable_entries:
h_e = self.pd.get_hull_energy(entry.composition)
self.assertAlmostEqual(h_e, entry.energy)
n_h_e = self.pd.get_hull_energy(entry.composition.fractional_composition)
self.assertAlmostEqual(n_h_e, entry.energy_per_atom)
def test_1d_pd(self):
entry = PDEntry('H', 0)
pd = PhaseDiagram([entry])
decomp, e = pd.get_decomp_and_e_above_hull(PDEntry('H', 1))
self.assertAlmostEqual(e, 1)
self.assertAlmostEqual(decomp[entry], 1.0)
def test_get_critical_compositions_fractional(self):
c1 = Composition('Fe2O3').fractional_composition
c2 = Composition('Li3FeO4').fractional_composition
c3 = Composition('Li2O').fractional_composition
comps = self.pd.get_critical_compositions(c1, c2)
expected = [Composition('Fe2O3').fractional_composition,
Composition('Li0.3243244Fe0.1621621O0.51351349'),
Composition('Li3FeO4').fractional_composition]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
comps = self.pd.get_critical_compositions(c1, c3)
expected = [Composition('Fe0.4O0.6'),
Composition('LiFeO2').fractional_composition,
Composition('Li5FeO4').fractional_composition,
Composition('Li2O').fractional_composition]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
def test_get_critical_compositions(self):
c1 = Composition('Fe2O3')
c2 = Composition('Li3FeO4')
c3 = Composition('Li2O')
comps = self.pd.get_critical_compositions(c1, c2)
expected = [Composition('Fe2O3'),
Composition('Li0.3243244Fe0.1621621O0.51351349') * 7.4,
Composition('Li3FeO4')]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
comps = self.pd.get_critical_compositions(c1, c3)
expected = [Composition('Fe2O3'),
Composition('LiFeO2'),
Composition('Li5FeO4') / 3,
Composition('Li2O')]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
# Don't fail silently if input compositions aren't in phase diagram
# Can be very confusing if you're working with a GrandPotentialPD
self.assertRaises(ValueError, self.pd.get_critical_compositions,
Composition('Xe'), Composition('Mn'))
# For the moment, should also fail even if compositions are in the gppd
# because it isn't handled properly
gppd = GrandPotentialPhaseDiagram(self.pd.all_entries, {'Xe': 1},
self.pd.elements + [Element('Xe')])
self.assertRaises(ValueError, gppd.get_critical_compositions,
Composition('Fe2O3'), Composition('Li3FeO4Xe'))
# check that the function still works though
comps = gppd.get_critical_compositions(c1, c2)
expected = [Composition('Fe2O3'),
Composition('Li0.3243244Fe0.1621621O0.51351349') * 7.4,
Composition('Li3FeO4')]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
# case where the endpoints are identical
self.assertEqual(self.pd.get_critical_compositions(c1, c1 * 2),
[c1, c1 * 2])
def test_get_composition_chempots(self):
c1 = Composition('Fe3.1O4')
c2 = Composition('Fe3.2O4.1Li0.01')
e1 = self.pd.get_hull_energy(c1)
e2 = self.pd.get_hull_energy(c2)
cp = self.pd.get_composition_chempots(c1)
calc_e2 = e1 + sum(cp[k] * v for k, v in (c2 - c1).items())
self.assertAlmostEqual(e2, calc_e2)
def test_get_all_chempots(self):
c1 = Composition('Fe3.1O4')
c2 = Composition('FeO')
cp1 = self.pd.get_all_chempots(c1)
cpresult = {Element("Li"): -4.077061954999998,
Element("Fe"): -6.741593864999999,
Element("O"): -6.969907375000003}
for elem, energy in cpresult.items():
self.assertAlmostEqual(cp1['FeO-LiFeO2-Fe3O4'][elem],energy)
cp2 = self.pd.get_all_chempots(c2)
cpresult = {Element("O"): -7.115354140000001,
Element("Fe"): -6.5961471,
Element("Li"): -3.9316151899999987}
for elem, energy in cpresult.items():
self.assertAlmostEqual(cp2['FeO-LiFeO2-Fe'][elem],energy)
def test_to_from_dict(self):
# test round-trip for other entry types such as ComputedEntry
entry = ComputedEntry('H', 0.0, 0.0, entry_id="test")
pd = PhaseDiagram([entry])
d = pd.as_dict()
pd_roundtrip = PhaseDiagram.from_dict(d)
self.assertEqual(pd.all_entries[0].entry_id,
pd_roundtrip.all_entries[0].entry_id)
class GrandPotentialPhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = GrandPotentialPhaseDiagram(self.entries, {Element("O"): -5})
self.pd6 = GrandPotentialPhaseDiagram(self.entries, {Element("O"): -6})
def test_stable_entries(self):
stable_formulas = [ent.original_entry.composition.reduced_formula
for ent in self.pd.stable_entries]
expected_stable = ['Li5FeO4', 'Li2FeO3', 'LiFeO2', 'Fe2O3', 'Li2O2']
for formula in expected_stable:
self.assertTrue(formula in stable_formulas, formula +
" not in stable entries!")
self.assertEqual(len(self.pd6.stable_entries), 4)
def test_get_formation_energy(self):
stable_formation_energies = {
ent.original_entry.composition.reduced_formula:
self.pd.get_form_energy(ent)
for ent in self.pd.stable_entries}
expected_formation_energies = {'Fe2O3': 0.0,
'Li5FeO4': -5.305515040000046,
'Li2FeO3': -2.3424741500000152,
'LiFeO2': -0.43026396250000154,
'Li2O2': 0.0}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(energy, stable_formation_energies[formula],
7, "Calculated formation for " +
formula + " is not correct!")
def test_str(self):
self.assertIsNotNone(str(self.pd))
class CompoundPhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = CompoundPhaseDiagram(self.entries, [Composition("Li2O"),
Composition("Fe2O3")])
def test_stable_entries(self):
stable_formulas = [ent.name for ent in self.pd.stable_entries]
expected_stable = ["Fe2O3", "Li5FeO4", "LiFeO2", "Li2O"]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas)
def test_get_formation_energy(self):
stable_formation_energies = {ent.name:
self.pd.get_form_energy(ent)
for ent in self.pd.stable_entries}
expected_formation_energies = {'Li5FeO4': -7.0773284399999739,
'Fe2O3': 0,
'LiFeO2': -0.47455929750000081,
'Li2O': 0}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(energy, stable_formation_energies[formula],
7)
def test_str(self):
self.assertIsNotNone(str(self.pd))
class ReactionDiagramTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
self.entries = list(EntrySet.from_csv(
os.path.join(module_dir, "reaction_entries_test.csv")).entries)
for e in self.entries:
if e.composition.reduced_formula == "VPO5":
entry1 = e
elif e.composition.reduced_formula == "H4(CO)3":
entry2 = e
self.rd = ReactionDiagram(entry1=entry1,
entry2=entry2,
all_entries=self.entries[2:])
def test_get_compound_pd(self):
self.rd.get_compound_pd()
def test_formed_formula(self):
formed_formula = [e.composition.reduced_formula for e in
self.rd.rxn_entries]
expected_formula = [
'V0.12707182P0.12707182H0.0441989C0.03314917O0.66850829',
'V0.125P0.125H0.05C0.0375O0.6625',
'V0.12230216P0.12230216H0.05755396C0.04316547O0.65467626',
'V0.11340206P0.11340206H0.08247423C0.06185567O0.62886598',
'V0.11267606P0.11267606H0.08450704C0.06338028O0.62676056',
'V0.11229947P0.11229947H0.0855615C0.06417112O0.62566845',
'V0.09677419P0.09677419H0.12903226C0.09677419O0.58064516',
'V0.05882353P0.05882353H0.23529412C0.17647059O0.47058824',
'V0.04225352P0.04225352H0.28169014C0.21126761O0.42253521']
for formula in expected_formula:
self.assertTrue(formula in formed_formula)
class PDPlotterTest(unittest.TestCase):
def setUp(self):
entries = list(EntrySet.from_csv(os.path.join(module_dir, "pdentries_test.csv")))
self.pd = PhaseDiagram(entries)
self.plotter = PDPlotter(self.pd, show_unstable=True)
entrieslio = [e for e in entries
if "Fe" not in e.composition]
self.pd_formation = PhaseDiagram(entrieslio)
self.plotter_formation = PDPlotter(self.pd_formation, show_unstable=0.1)
entries.append(PDEntry("C", 0))
self.pd3d = PhaseDiagram(entries)
self.plotter3d = PDPlotter(self.pd3d, show_unstable=0.1)
def test_pd_plot_data(self):
(lines, labels, unstable_entries) = self.plotter.pd_plot_data
self.assertEqual(len(lines), 22)
self.assertEqual(len(labels), len(self.pd.stable_entries),
"Incorrect number of lines generated!")
self.assertEqual(len(unstable_entries),
len(self.pd.all_entries) - len(self.pd.stable_entries),
"Incorrect number of lines generated!")
(lines, labels, unstable_entries) = self.plotter3d.pd_plot_data
self.assertEqual(len(lines), 33)
self.assertEqual(len(labels), len(self.pd3d.stable_entries))
self.assertEqual(len(unstable_entries),
len(self.pd3d.all_entries) - len(self.pd3d.stable_entries))
(lines, labels, unstable_entries) = self.plotter_formation.pd_plot_data
self.assertEqual(len(lines), 3)
self.assertEqual(len(labels), len(self.pd_formation.stable_entries))
def test_get_plot(self):
# Some very basic non-tests. Just to make sure the methods are callable.
self.plotter.get_plot().close()
self.plotter3d.get_plot().close()
self.plotter.get_contour_pd_plot().close()
# self.plotter.get_plot(energy_colormap="Reds", process_attributes=True)
# plt = self.plotter3d.get_plot(energy_colormap="Reds",
# process_attributes=True)
# self.plotter.get_plot(energy_colormap="Reds", process_attributes=False)
# plt = self.plotter3d.get_plot(energy_colormap="Reds",
# process_attributes=False)
self.plotter.get_chempot_range_map_plot([Element("Li"), Element("O")]).close()
self.plotter.plot_element_profile(Element("O"), Composition("Li2O")).close()
class UtilityFunctionTest(unittest.TestCase):
def test_unique_lines(self):
testdata = [[5, 53, 353], [399, 20, 52], [399, 400, 20], [13, 399, 52],
[21, 400, 353], [393, 5, 353], [400, 393, 353],
[393, 400, 399], [393, 13, 5], [13, 393, 399],
[400, 17, 20], [21, 17, 400]]
expected_ans = set([(5, 393), (21, 353), (353, 400), (5, 13), (17, 20),
(21, 400), (17, 400), (52, 399), (393, 399),
(20, 52), (353, 393), (5, 353), (5, 53), (13, 399),
(393, 400), (13, 52), (53, 353), (17, 21),
(13, 393), (20, 399), (399, 400), (20, 400)])
self.assertEqual(uniquelines(testdata), expected_ans)
def test_triangular_coord(self):
coord = [0.5, 0.5]
coord = triangular_coord(coord)
self.assertTrue(np.allclose(coord, [0.75, 0.4330127]))
def test_tet_coord(self):
coord = [0.5, 0.5, 0.5]
coord = tet_coord(coord)
self.assertTrue(np.allclose(coord, [1., 0.57735027, 0.40824829]))
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import absolute_import
from tempfile import NamedTemporaryFile
from django.conf import settings
from django.http import HttpResponse
from django.template.response import TemplateResponse
from django.views.generic import TemplateView
from django.utils.encoding import smart_text
from .utils import (content_disposition_filename, make_absolute_paths,
wkhtmltopdf)
class PDFResponse(HttpResponse):
"""HttpResponse that sets the headers for PDF output."""
def __init__(self, content, status=200, content_type=None,
filename=None, show_content_in_browser=None, *args, **kwargs):
if content_type is None:
content_type = 'application/pdf'
super(PDFResponse, self).__init__(content=content,
status=status,
content_type=content_type)
self.set_filename(filename, show_content_in_browser)
def set_filename(self, filename, show_content_in_browser):
self.filename = filename
if filename:
fileheader = 'attachment; filename={0}'
if show_content_in_browser:
fileheader = 'inline; filename={0}'
filename = content_disposition_filename(filename)
header_content = fileheader.format(filename)
self['Content-Disposition'] = header_content
else:
del self['Content-Disposition']
class PDFTemplateResponse(TemplateResponse, PDFResponse):
"""Renders a Template into a PDF using wkhtmltopdf"""
def __init__(self, request, template, context=None,
status=None, content_type=None, current_app=None,
filename=None, show_content_in_browser=None,
header_template=None, footer_template=None,
cmd_options=None, *args, **kwargs):
super(PDFTemplateResponse, self).__init__(request=request,
template=template,
context=context,
status=status,
content_type=content_type,
current_app=None,
*args, **kwargs)
self.set_filename(filename, show_content_in_browser)
self.header_template = header_template
self.footer_template = footer_template
if cmd_options is None:
cmd_options = {}
self.cmd_options = cmd_options
def render_to_temporary_file(self, template_name, mode='w+b', bufsize=-1,
suffix='.html', prefix='tmp', dir=None,
delete=True):
template = self.resolve_template(template_name)
context = self.resolve_context(self.context_data)
content = smart_text(template.render(context))
content = make_absolute_paths(content)
try:
# Python3 has 'buffering' arg instead of 'bufsize'
tempfile = NamedTemporaryFile(mode=mode, buffering=bufsize,
suffix=suffix, prefix=prefix,
dir=dir, delete=delete)
except TypeError:
tempfile = NamedTemporaryFile(mode=mode, bufsize=bufsize,
suffix=suffix, prefix=prefix,
dir=dir, delete=delete)
try:
tempfile.write(content.encode('utf-8'))
tempfile.flush()
return tempfile
except:
# Clean-up tempfile if an Exception is raised.
tempfile.close()
raise
def convert_to_pdf(self, filename,
header_filename=None, footer_filename=None):
cmd_options = self.cmd_options.copy()
# Clobber header_html and footer_html only if filenames are
# provided. These keys may be in self.cmd_options as hardcoded
# static files.
if header_filename is not None:
cmd_options['header_html'] = header_filename
if footer_filename is not None:
cmd_options['footer_html'] = footer_filename
return wkhtmltopdf(pages=[filename], **cmd_options)
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the PDFResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
debug = getattr(settings, 'WKHTMLTOPDF_DEBUG', settings.DEBUG)
input_file = header_file = footer_file = None
header_filename = footer_filename = None
try:
input_file = self.render_to_temporary_file(
template_name=self.template_name,
prefix='wkhtmltopdf', suffix='.html',
delete=(not debug)
)
if self.header_template:
header_file = self.render_to_temporary_file(
template_name=self.header_template,
prefix='wkhtmltopdf', suffix='.html',
delete=(not debug)
)
header_filename = header_file.name
if self.footer_template:
footer_file = self.render_to_temporary_file(
template_name=self.footer_template,
prefix='wkhtmltopdf', suffix='.html',
delete=(not debug)
)
footer_filename = footer_file.name
return self.convert_to_pdf(filename=input_file.name,
header_filename=header_filename,
footer_filename=footer_filename)
finally:
# Clean up temporary files
for f in filter(None, (input_file, header_file, footer_file)):
f.close()
class PDFTemplateView(TemplateView):
"""Class-based view for HTML templates rendered to PDF."""
# Filename for downloaded PDF. If None, the response is inline.
filename = 'rendered_pdf.pdf'
# Send file as attachement. If True render content in the browser.
show_content_in_browser = False
# Filenames for the content, header, and footer templates.
template_name = None
header_template = None
footer_template = None
# TemplateResponse classes for PDF and HTML
response_class = PDFTemplateResponse
html_response_class = TemplateResponse
# Command-line options to pass to wkhtmltopdf
cmd_options = {
# 'orientation': 'portrait',
# 'collate': True,
# 'quiet': None,
}
def __init__(self, *args, **kwargs):
super(PDFTemplateView, self).__init__(*args, **kwargs)
# Copy self.cmd_options to prevent clobbering the class-level object.
self.cmd_options = self.cmd_options.copy()
def get(self, request, *args, **kwargs):
response_class = self.response_class
try:
if request.GET.get('as', '') == 'html':
# Use the html_response_class if HTML was requested.
self.response_class = self.html_response_class
return super(PDFTemplateView, self).get(request,
*args, **kwargs)
finally:
# Remove self.response_class
self.response_class = response_class
def get_filename(self):
return self.filename
def get_cmd_options(self):
return self.cmd_options
def render_to_response(self, context, **response_kwargs):
"""
Returns a PDF response with a template rendered with the given context.
"""
filename = response_kwargs.pop('filename', None)
cmd_options = response_kwargs.pop('cmd_options', None)
if issubclass(self.response_class, PDFTemplateResponse):
if filename is None:
filename = self.get_filename()
if cmd_options is None:
cmd_options = self.get_cmd_options()
return super(PDFTemplateView, self).render_to_response(
context=context, filename=filename,
show_content_in_browser=self.show_content_in_browser,
header_template=self.header_template,
footer_template=self.footer_template,
cmd_options=cmd_options,
**response_kwargs
)
else:
return super(PDFTemplateView, self).render_to_response(
context=context,
**response_kwargs
)
|
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package recurrent
# Module caffe2.python.recurrent
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from future.utils import viewitems, viewkeys
def recurrent_net(
net, cell_net, inputs, initial_cell_inputs,
links, timestep=None, scope=None, outputs_with_grads=(0,),
recompute_blobs_on_backward=None, forward_only=False,
):
'''
net: the main net operator should be added to
cell_net: cell_net which is executed in a recurrent fasion
inputs: sequences to be fed into the recurrent net. Currently only one input
is supported. It has to be in a format T x N x (D1...Dk) where T is lengths
of the sequence. N is a batch size and (D1...Dk) are the rest of dimentions
initial_cell_inputs: inputs of the cell_net for the 0 timestamp.
Format for each input is:
(cell_net_input_name, external_blob_with_data)
links: a dictionary from cell_net input names in moment t+1 and
output names of moment t. Currently we assume that each output becomes
an input for the next timestep.
timestep: name of the timestep blob to be used. If not provided "timestep"
is used.
scope: Internal blobs are going to be scoped in a format
<scope_name>/<blob_name>
If not provided we generate a scope name automatically
outputs_with_grads : position indices of output blobs which will receive
error gradient (from outside recurrent network) during backpropagation
recompute_blobs_on_backward: specify a list of blobs that will be
recomputed for backward pass, and thus need not to be
stored for each forward timestep.
forward_only: if True, only forward steps are executed
'''
assert len(inputs) == 1, "Only one input blob is supported so far"
input_blobs = [str(i[0]) for i in inputs]
initial_input_blobs = [str(x[1]) for x in initial_cell_inputs]
op_name = net.NextName('recurrent')
def s(name):
# We have to manually scope due to our internal/external blob
# relationships.
scope_name = op_name if scope is None else scope
return "{}/{}".format(str(scope_name), str(name))
# determine inputs that are considered to be references
# it is those that are not referred to in inputs or initial_cell_inputs
known_inputs = [str(b) for b in input_blobs + initial_input_blobs]
known_inputs += [str(x[0]) for x in initial_cell_inputs]
if timestep is not None:
known_inputs.append(str(timestep))
references = [
core.BlobReference(b) for b in cell_net.Proto().external_input
if b not in known_inputs]
inner_outputs = list(cell_net.Proto().external_output)
# These gradients are expected to be available during the backward pass
inner_outputs_map = {o: o + '_grad' for o in inner_outputs}
# compute the backward pass of the cell net
if not forward_only:
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
cell_net.Proto().op, inner_outputs_map)
backward_mapping = {str(k): v for k, v in viewitems(backward_mapping)}
backward_cell_net = core.Net("RecurrentBackwardStep")
del backward_cell_net.Proto().op[:]
if recompute_blobs_on_backward is not None:
# Insert operators to re-compute the specified blobs.
# They are added in the same order as for the forward pass, thus
# the order is correct.
recompute_blobs_on_backward = {str(b) for b in
recompute_blobs_on_backward}
for op in cell_net.Proto().op:
if not recompute_blobs_on_backward.isdisjoint(set(op.output)):
backward_cell_net.Proto().op.extend([op])
# This fires if other outputs than the declared
# are computed by the ops that are recomputed
assert set(op.output).issubset(recompute_blobs_on_backward)
backward_cell_net.Proto().op.extend(backward_ops)
# compute blobs used but not defined in the backward pass
backward_ssa, backward_blob_versions = core.get_ssa(
backward_cell_net.Proto())
undefined = core.get_undefined_blobs(backward_ssa)
# also add to the output list the intermediate outputs of fwd_step that
# are used by backward.
ssa, blob_versions = core.get_ssa(cell_net.Proto())
scratches = [
blob
for blob, ver in viewitems(blob_versions)
if (ver > 0 and
blob in undefined and
blob not in cell_net.Proto().external_output)
]
backward_cell_net.Proto().external_input.extend(scratches)
backward_cell_net.Proto().type = 'simple'
else:
backward_cell_net = None
all_inputs = [i[1] for i in inputs] + [
x[1] for x in initial_cell_inputs] + references
all_outputs = []
cell_net.Proto().type = 'simple'
# Internal arguments used by RecurrentNetwork operator
# Links are in the format blob_name, recurrent_states, offset.
# In the moment t we know that corresponding data block is at
# t + offset position in the recurrent_states tensor
forward_links = []
backward_links = []
# Aliases are used to expose outputs to external world
# Format (internal_blob, external_blob, offset)
# Negative offset stands for going from the end,
# positive - from the beginning
aliases = []
# States held inputs to the cell net
recurrent_states = []
for cell_input, _ in initial_cell_inputs:
cell_input = str(cell_input)
# Recurrent_states is going to be (T + 1) x ...
# It stores all inputs and outputs of the cell net over time.
# Or their gradients in the case of the backward pass.
state = s(cell_input + "_states")
states_grad = state + "_grad"
cell_output = links[str(cell_input)]
forward_links.append((cell_input, state, 0))
forward_links.append((cell_output, state, 1))
aliases.append((state, cell_output + "_all", 1))
aliases.append((state, cell_output + "_last", -1))
all_outputs.extend([cell_output + "_all", cell_output + "_last"])
recurrent_states.append(state)
if backward_cell_net is not None:
backward_links.append((cell_output + "_grad", states_grad, 1))
backward_cell_net.Proto().external_input.append(
str(cell_output) + "_grad")
recurrent_input_grad = cell_input + "_grad"
if not backward_blob_versions.get(recurrent_input_grad, 0):
# If nobody writes to this recurrent input gradient, we need
# to make sure it gets to the states grad blob after all.
# We do this by using backward_links which triggers an alias
# This logic is being used for example in a SumOp case
backward_links.append(
(backward_mapping[cell_input], states_grad, 0))
else:
backward_links.append((recurrent_input_grad, states_grad, 0))
for input_t, input_blob in inputs:
forward_links.append((str(input_t), str(input_blob), 0))
if backward_cell_net is not None:
for input_t, input_blob in inputs:
backward_links.append((
backward_mapping[str(input_t)], str(input_blob) + "_grad", 0
))
backward_cell_net.Proto().external_input.extend(
cell_net.Proto().external_input)
backward_cell_net.Proto().external_input.extend(
cell_net.Proto().external_output)
def unpack_triple(x):
if x:
a, b, c = zip(*x)
return a, b, c
return [], [], []
# Splitting to separate lists so we can pass them to c++
# where we ensemle them back
link_internal, link_external, link_offset = unpack_triple(forward_links)
alias_src, alias_dst, alias_offset = unpack_triple(aliases)
recurrent_inputs = [str(x[1]) for x in initial_cell_inputs]
# Make sure that recurrent gradients accumulate with internal gradients
# (if a blob in the backward_cell_net receives gradient from both an
# external connection as well as from within the backward_cell_net,
# those gradients need to be added together, rather than one overwriting
# the other)
if backward_cell_net is not None:
proto = backward_cell_net.Proto()
operators = []
while len(proto.op) > 0:
op = proto.op[-1]
proto.op.remove(op)
operators.append(op)
for op in operators[::-1]:
proto.op.extend([op])
for j, output_blob in enumerate(op.output):
if output_blob in proto.external_input:
# In place operation won't cause issues because it takes
# existing value of a blob into account
if output_blob in op.input:
continue
output_blob = core.BlobReference(output_blob)
accum_blob = output_blob + "_accum"
proto.op[-1].output[j] = str(accum_blob)
backward_cell_net.Sum(
[output_blob, accum_blob],
[output_blob],
)
def map_to_dual_list(m):
return [str(x) for x in list(m.keys())] + \
[str(x) for x in list(m.values())]
backward_args = {}
if backward_cell_net is not None:
backward_mapping_keys = set(viewkeys(backward_mapping))
backward_link_internal, backward_link_external, backward_link_offset = \
unpack_triple(backward_links)
params = [x for x in references if x in backward_mapping_keys]
param_grads = [
str(backward_mapping[x])
for x in references
if x in backward_mapping_keys
]
if recompute_blobs_on_backward is None:
recompute_blobs_on_backward = set()
backward_args = {
'param': [all_inputs.index(p) for p in params],
'backward_link_internal': [str(l) for l in backward_link_internal],
'backward_link_external': [str(l) for l in backward_link_external],
'backward_link_offset': backward_link_offset,
'outputs_with_grads': outputs_with_grads,
'recompute_blobs_on_backward': [
str(b) for b in recompute_blobs_on_backward
],
'param_grads': param_grads,
}
if len(backward_cell_net.Proto().op) != 0:
backward_args['backward_step_net'] = backward_cell_net.Proto()
results = net.RecurrentNetwork(
all_inputs,
all_outputs + [s("step_workspaces")],
alias_src=alias_src,
alias_dst=[str(a) for a in alias_dst],
alias_offset=alias_offset,
recurrent_states=recurrent_states,
initial_recurrent_state_ids=[
all_inputs.index(i) for i in recurrent_inputs
],
link_internal=[str(l) for l in link_internal],
link_external=[str(l) for l in link_external],
link_offset=link_offset,
enable_rnn_executor=1,
step_net=cell_net.Proto(),
timestep="timestep" if timestep is None else str(timestep),
**backward_args
)
# Restore net type since 'rnn' is not recognized outside RNNs
cell_net.Proto().type = 'simple'
# The last output is a list of step workspaces,
# which is only needed internally for gradient propogation
return results[:-1]
def set_rnn_executor_config(rnn_op, num_threads=None, max_cuda_streams=None):
from caffe2.proto import caffe2_pb2
assert rnn_op.type in {'RecurrentNetwork', 'RecurrentNetworkGradient'}
def add_arg(s, v):
a = caffe2_pb2.Argument()
a.name = "rnn_executor." + s
a.i = v
rnn_op.arg.extend([a])
if num_threads is not None:
add_arg('num_threads', num_threads)
if max_cuda_streams is not None:
add_arg('max_cuda_streams', max_cuda_streams)
def retrieve_step_blobs(net, prefix='rnn'):
'''
Retrieves blobs from step workspaces (which contain intermediate recurrent
network computation for each timestep) and puts them in the global
workspace. This allows access to the contents of this intermediate
computation in python. Returns the list of extracted blob names.
net: the net from which the step workspace blobs should be extracted
prefix: prefix to append to extracted blob names when placing them in the
global workspace
'''
count = 1
output_list = []
for op in net.Proto().op:
if op.type == "RecurrentNetwork":
blob_name = prefix + "_" + str(count)
count = count + 1
scratch_workspaces_blob_name = op.output[-1]
workspace.RunOperatorOnce(
core.CreateOperator(
"RecurrentNetworkBlobFetcher",
[scratch_workspaces_blob_name],
[blob_name],
prefix=prefix
)
)
output_list += workspace.FetchBlob(blob_name).tolist()
return output_list
|
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Implementation of Grid3DAction actions
'''
__docformat__ = 'restructuredtext'
import math
import random
from cocos.director import director
from cocos.euclid import *
from basegrid_actions import *
rr = random.randrange
__all__ = [
'Waves3D', # 3d actions that modifies the z-coordinate
'FlipX3D',
'FlipY3D',
'Lens3D',
'Shaky3D',
'Ripple3D',
'Liquid', # 3d actions that don't modify the z-coordinate
'Waves',
'Twirl',
]
class Waves3D( Grid3DAction ):
'''Simulates waves using the math.sin() function in the z-axis
The x and y coordinates remains unmodified.
Example::
scene.do( Waves3D( waves=5, amplitude=40, grid=(16,16), duration=10) )
'''
def init( self, waves=4, amplitude=20, *args, **kw ):
'''
:Parameters:
`waves` : int
Number of waves (2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
'''
super(Waves3D, self).init( *args, **kw )
#: Total number of waves to perform
self.waves=waves
#: amplitude rate. Default: 1.0.
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
self.amplitude=amplitude
def update( self, t ):
for i in xrange(0, self.grid.x+1):
for j in xrange(0, self.grid.y+1):
x,y,z = self.get_original_vertex(i,j)
z += (math.sin(t*math.pi*self.waves*2 + (y+x) * .01) * self.amplitude * self.amplitude_rate )
self.set_vertex( i,j, (x, y, z) )
class FlipX3D( Grid3DAction ):
'''FlipX3D flips the screen using the Y-axis as a pivot.'''
def init(self, grid=(1,1), *args, **kw):
if grid != (1,1):
raise GridException("Invalid grid size.")
super(FlipX3D,self).init(grid=grid,*args,**kw)
def update( self, t ):
angle = math.pi * t # 180 degrees
mz = math.sin( angle )
angle = angle / 2.0 # x calculates degrees from 0 to 90
mx = math.cos( angle )
x0,y,z = self.get_original_vertex(1,1)
x1,y,z = self.get_original_vertex(0,0)
if x0 > x1:
# Normal Grid
a = (0,0)
b = (0,1)
c = (1,0)
d = (1,1)
x = x0
else:
# Reversed Grid
c = (0,0)
d = (0,1)
a = (1,0)
b = (1,1)
x = x1
diff_x = x - x * mx
diff_z = abs( (x * mz) // 4.0 )
# bottom-left
x,y,z = self.get_original_vertex(*a)
self.set_vertex(a[0],a[1],(diff_x,y,z+diff_z))
# upper-left
x,y,z = self.get_original_vertex(*b)
self.set_vertex(b[0],b[1],(diff_x,y,z+diff_z))
# bottom-right
x,y,z = self.get_original_vertex(*c)
self.set_vertex(c[0],c[1],(x-diff_x,y,z-diff_z))
# upper-right
x,y,z = self.get_original_vertex(*d)
self.set_vertex(d[0],d[1],(x-diff_x,y,z-diff_z))
class FlipY3D( Grid3DAction ):
'''FlipY3D flips the screen using the X-axis as a pivot.'''
def init(self, grid=(1,1), *args, **kw):
if grid != (1,1):
raise GridException("Invalid grid size.")
super(FlipY3D,self).init(grid=grid,*args,**kw)
def update( self, t ):
angle = math.pi * t # 180 degrees
mz = math.sin( angle )
angle = angle / 2.0 # x calculates degrees from 0 to 90
my = math.cos( angle )
x,y0,z = self.get_original_vertex(1,1)
x,y1,z = self.get_original_vertex(0,0)
if y0 > y1:
# Normal Grid
a = (0,0)
b = (0,1)
c = (1,0)
d = (1,1)
y = y0
else:
# Reversed Grid
b = (0,0)
a = (0,1)
d = (1,0)
c = (1,1)
y = y1
diff_y = y - y * my
diff_z = abs( (y * mz) // 4.0 )
# bottom-left
x,y,z = self.get_original_vertex(*a)
self.set_vertex(a[0],a[1],(x,diff_y,z+diff_z))
# upper-left
x,y,z = self.get_original_vertex(*b)
self.set_vertex(b[0],b[1],(x,y-diff_y,z-diff_z))
# bottom-right
x,y,z = self.get_original_vertex(*c)
self.set_vertex(c[0],c[1],(x,diff_y,z+diff_z))
# upper-right
x,y,z = self.get_original_vertex(*d)
self.set_vertex(d[0],d[1],(x,y-diff_y,z-diff_z))
class Lens3D( Grid3DAction ):
'''Lens simulates a Lens / Magnifying glass effect.
It modifies the z-coordinate while the x and y remains unmodified.
Example::
scene.do( Lens3D(center=(320,240), radius=150, grid=(16,16), duration=10) )
'''
def init(self, center=(-1,-1), radius=160, lens_effect=0.7, *args, **kw):
'''
:Parameters:
`center` : (int,int)
Center of the lens. Default: (win_size_width /2, win_size_height /2 )
`radius` : int
Radius of the lens.
`lens_effect` : float
How strong is the lens effect. Default: 0.7. 0 is no effect at all, 1 is a very strong lens effect.
'''
super(Lens3D,self).init( *args, **kw)
x,y = director.get_window_size()
if center==(-1,-1):
center=(x//2, y//2)
#: position of the center of the len. Type: (int,int).
#: This value can be modified by other actions, like `JumpBy` to simulate a jumping lens
self.position = Point2( center[0]+1, center[1]+1 )
#: radius of the lens. Type: float
self.radius = radius
#: lens effect factor. Type: float
self.lens_effect = lens_effect
self._last_position = (-1000,-1000) # dirty vrbl
def update( self, t ):
if self.position != self._last_position:
for i in xrange(0, self.grid.x+1):
for j in xrange(0, self.grid.y+1):
x,y,z = self.get_original_vertex(i,j)
p = Point2( x,y )
vect = self.position - p
r = abs(vect)
if r < self.radius:
r = self.radius - r
pre_log = r/self.radius
if pre_log == 0:
pre_log = 0.001
l = math.log( pre_log )*self.lens_effect
new_r = math.exp( l ) * self.radius
vect.normalize()
new_vect = vect * new_r
z += abs(new_vect) * self.lens_effect # magic vrbl
# set all vertex, not only the on the changed
# since we want to 'fix' possible moved vertex
self.set_vertex( i,j, (x,y,z) )
self._last_position = self.position
class Ripple3D( Grid3DAction ):
'''Simulates a ripple (radial wave) effect.
The amplitude of the wave will decrease when it goes away from the center of the ripple.
It modifies the z-coordinate while the x and y remains unmodified.
Example::
scene.do( Ripple3D(center=(320,240), radius=240, waves=15, amplitude=60, duration=20, grid=(32,24) ) )
'''
def init(self, center=(-1,-1), radius=240, waves=15, amplitude=60, *args, **kw):
'''
:Parameters:
`center` : (int,int)
Center of the ripple. Default: (win_size_width /2, win_size_height /2 )
`radius` : int
Radius of the ripple. Default: 240
`waves` : int
Number of waves (2 * pi) that the action will perform. Default: 15
`amplitude` : int
Wave amplitude (height). Default is 60
'''
super(Ripple3D,self).init( *args, **kw)
x,y = director.get_window_size()
if center==(-1,-1):
center=(x//2, y//2)
#: Center of the ripple. Type: (int,int).
#: This value can be modified by other actions, like `JumpBy` to simulate a jumping ripple effect
self.position = Point2( center[0]+1, center[1]+1 )
#: radius of the ripple. Type: float
self.radius = radius
#: number of waves. Type: int
self.waves = waves
#: amplitude rate. Default: 1.0.
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
self.amplitude=amplitude
def update( self, t ):
for i in xrange(0, self.grid.x+1):
for j in xrange(0, self.grid.y+1):
x,y,z = self.get_original_vertex(i,j)
p = Point2( x,y )
vect = self.position - p
r = abs(vect)
if r < self.radius:
r = self.radius - r
rate = pow( r / self.radius, 2)
z += (math.sin( t*math.pi*self.waves*2 + r * 0.1) * self.amplitude * self.amplitude_rate * rate )
self.set_vertex( i,j, (x,y,z) )
class Shaky3D( Grid3DAction):
'''Shaky simulates an earthquake by modifying randomly the x, y and z coordinates of each vertex.
Example::
scene.do( Shaky3D( randrange=6, grid=(4,4), duration=10) )
'''
def init( self, randrange=6, *args, **kw ):
'''
:Parameters:
`randrange` : int
Number that will be used in random.randrange( -randrange, randrange) to do the effect
'''
super(Shaky3D,self).init(*args,**kw)
#: random range of the shaky effect
self.randrange = randrange
def update( self, t ):
for i in xrange(0, self.grid.x+1):
for j in xrange(0, self.grid.y+1):
x,y,z = self.get_original_vertex(i,j)
x += rr( -self.randrange, self.randrange+1 )
y += rr( -self.randrange, self.randrange+1 )
z += rr( -self.randrange, self.randrange+1 )
self.set_vertex( i,j, (x,y,z) )
class Liquid( Grid3DAction ):
'''Simulates a liquid effect using the math.sin() function modifying the x and y coordinates.
The z coordinate remains unmodified.
Example::
scene.do( Liquid( waves=5, amplitude=40, grid=(16,16), duration=10) )
'''
def init( self, waves=4, amplitude=20, *args, **kw ):
'''
:Parameters:
`waves` : int
Number of waves (2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
'''
super(Liquid, self).init( *args, **kw )
#: total number of waves
self.waves=waves
#: amplitude of the waves
self.amplitude=amplitude
#: amplitude rate. Default: 1.0.
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
def update( self, t ):
for i in xrange(1, self.grid.x):
for j in xrange(1, self.grid.y):
x,y,z = self.get_original_vertex(i,j)
xpos = (x + (math.sin(t*math.pi*self.waves*2 + x * .01) * self.amplitude * self.amplitude_rate))
ypos = (y + (math.sin(t*math.pi*self.waves*2 + y * .01) * self.amplitude * self.amplitude_rate))
self.set_vertex( i,j, (xpos,ypos,z) )
class Waves( Grid3DAction ):
'''Simulates waves using the math.sin() function both in the vertical and horizontal axis.
The z coordinate is not modified.
Example::
scene.do( Waves( waves=4, amplitude=20, hsin=False, vsin=True, grid=(16,16), duration=10) )
'''
def init( self, waves=4, amplitude=20, hsin=True, vsin=True, *args, **kw ):
'''Initializes the Waves actions
:Parameters:
`waves` : int
Number of waves (2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
`hsin` : bool
whether or not in will perform horizontal waves. Default is True
`vsin` : bool
whether or not in will perform vertical waves. Default is True
'''
super(Waves, self).init( *args, **kw )
#: whether or not it will do horizontal waves
self.hsin = hsin
#: whether or not it will do vertical waves
self.vsin = vsin
#: total number of wave
self.waves=waves
#: amplitude of the waves
self.amplitude=amplitude
#: amplitude rate. Default: 1.0
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
def update( self, t ):
for i in xrange(0, self.grid.x+1):
for j in xrange(0, self.grid.y+1):
x,y,z = self.get_original_vertex(i,j)
if self.vsin:
xpos = (x + (math.sin(t*math.pi*self.waves*2 + y * .01) * self.amplitude * self.amplitude_rate))
else:
xpos = x
if self.hsin:
ypos = (y + (math.sin(t*math.pi*self.waves*2 + x * .01) * self.amplitude * self.amplitude_rate))
else:
ypos = y
self.set_vertex( i,j, (xpos,ypos,z) )
class Twirl( Grid3DAction ):
'''Simulates a twirl effect modifying the x and y coordinates.
The z coordinate is not modified.
Example::
scene.do( Twirl( center=(320,240), twirls=5, amplitude=1, grid=(16,12), duration=10) )
'''
def init( self, center=(-1,-1), twirls=4, amplitude=1, *args, **kw ):
'''
:Parameters:
`twirls` : int
Number of twirls (2 * pi) that the action will perform. Default is 4
`amplitude` : flaot
Twirl amplitude. Default is 1
`center` : (int,int)
Center of the twirl in x,y coordinates. Default: center of the screen
'''
super(Twirl, self).init( *args, **kw )
x,y = director.get_window_size()
if center==(-1,-1):
center=(x//2, y//2)
#: position of the center of the Twril. Type: (int,int).
#: This value can be modified by other actions, like `JumpBy` to simulate a jumping Twirl
self.position = Point2( center[0]+1, center[1]+1 )
#: total number of twirls
self.twirls = twirls
#: amplitude of the twirls
self.amplitude=amplitude
#: amplitude rate. Default: 1.0.
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
def update( self, t ):
cx = self.position.x
cy = self.position.y
for i in xrange(0, self.grid.x+1):
for j in xrange(0, self.grid.y+1):
x,y,z = self.get_original_vertex(i,j)
r = math.sqrt( (i-self.grid.x/2.0) ** 2 + (j-self.grid.y/2.0) ** 2 )
amplitude = 0.1 * self.amplitude * self.amplitude_rate
a = r * math.cos( math.pi/2.0 + t * math.pi * self.twirls * 2 ) * amplitude
dx = math.sin(a) * (y-cy) + math.cos(a) * (x-cx)
dy = math.cos(a) * (y-cy) - math.sin(a) * (x-cx)
self.set_vertex( i,j, (cx+dx, cy+dy,z) )
|
|
import os
import urllib2
from urlparse import urlparse
from cStringIO import StringIO
from PIL import Image
import markdown
import datetime
from time import strftime
from hashlib import md5
import uuid
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
from django.utils.encoding import smart_unicode, smart_str
from django.conf import settings
from django.core.urlresolvers import get_script_prefix
from django.utils.encoding import iri_to_uri
from .managers import TopicManager, EntryManager
from .utils import create_thumbnail, guid_generator
CONTENT_FORMAT_CHOICES = ((u'markdown', u'Markdown'), (u'html', u'Raw HTML'),)
IS_ACTIVE_CHOICES = ((True, 'Published'), (False, 'Draft'))
CAN_SUBMIT_CHOICES = ((True, 'Everyone'), (False, 'Only users I allow.'))
IS_PUBLIC_CHOICES = ((True, 'Everyone'), (False, 'No one'))
def upload_topic(instance, filename):
user_guid = guid_generator(instance.user.id)
return 'user_uploads/%s/topics/%s' % (user_guid, filename)
def upload_topic_thumb(instance, filename):
user_guid = guid_generator(instance.user.id)
return 'user_uploads/%s/topics/thumbnails/%s' % (user_guid, filename)
def media_original(instance, filename):
user_guid = guid_generator(instance.user.id)
return 'user_uploads/%s/media/%s' % (user_guid, filename)
def media_thumb(instance, filename):
user_guid = guid_generator(instance.user.id)
return 'user_uploads/%s/media/thumbnails/%s' % (user_guid, filename)
class Topic(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, unique=True)
description = models.TextField(max_length=1020)
pub_date = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='owner')
members = models.ManyToManyField(settings.AUTH_USER_MODEL, db_table='r_Topic_Members', blank=True, related_name='topics', related_query_name='topic')
is_public = models.BooleanField(help_text=_("Can everyone see this Topic?"), choices=IS_PUBLIC_CHOICES, default=True)
image = models.ImageField(upload_to=upload_topic, blank=True)
thumbnail = models.ImageField(upload_to=upload_topic_thumb, blank=True)
guid = models.CharField(max_length=32, unique=True)
objects = TopicManager()
class Meta:
db_table = 'r_Topic'
verbose_name_plural = 'topics'
ordering = ['-title']
get_latest_by = 'pub_date'
def __unicode__(self):
return "%s" % (self.title,)
def entry_count(self):
entry_total = Entry.objects.filter(topic=self).count()
return entry_total
def save(self, *args, **kwargs):
if self.image and not self.thumbnail:
create_thumbnail(self.image)
if not self.id:
self.slug = slugify(self.title)
#GUID Generation
guid_base = str(uuid.uuid4())
guid_encoded = guid_base.encode('ascii', 'ignore')
guid = md5(guid_encoded).hexdigest()[:16]
self.guid = guid
super(Topic, self).save(*args, **kwargs)
class Media(models.Model):
title = models.CharField(max_length=510)
pub_date = models.DateTimeField(auto_now_add=True)
description = models.CharField(max_length=510, blank=True)
url = models.URLField(blank=True)
image = models.ImageField(upload_to=media_original, blank=True, null=True)
thumbnail = models.ImageField(upload_to=media_thumb, blank=True, null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
guid = models.CharField(max_length=32, unique=True)
class Meta:
db_table = 'r_Media'
verbose_name_plural = 'Media'
def __unicode__(self):
return self.title
def get_absolute_url(self):
if self.image:
return "%s" % (self.image.url)
else:
return "%s" % (self.url)
def save(self):
if self.image:
create_thumbnail(self)
if not self.id:
guid_base = str(uuid.uuid4())
guid_encoded = guid_base.encode('ascii', 'ignore')
guid = md5(guid_encoded).hexdigest()[:16]
self.guid = guid
super(Media, self).save()
class EntryType(models.Model):
title = models.CharField(max_length=510)
slug = models.SlugField(max_length=510, unique=True)
guid = models.CharField(max_length=32, unique=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
class Meta:
db_table = 'r_EntryType'
verbose_name_plural = 'Entry Types'
ordering = ('-title',)
def __unicode__(self):
return self.title
def entry_count(self):
entry_total = Entry.objects.filter(post_type=self).count()
return entry_total
def save(self, *args, **kwargs):
if not self.guid:
#GUID Generation
guid_base = str(uuid.uuid4())
guid_encoded = guid_base.encode('ascii', 'ignore')
guid = md5(guid_encoded).hexdigest()[:16]
self.guid = guid
super(EntryType, self).save(*args, **kwargs)
class Entry(models.Model):
title = models.CharField(max_length=510)
slug = models.SlugField(max_length=510, unique_for_date='pub_date')
url = models.CharField(max_length=255, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
topic = models.ManyToManyField(Topic, db_table='r_Entry_Topics', blank=True)
pub_date = models.DateTimeField(verbose_name=_("Publication date"), default=datetime.datetime.now)
is_active = models.BooleanField(help_text=_("This should be checked for live entries"), choices=IS_ACTIVE_CHOICES, default=False)
post_type = models.ForeignKey(EntryType, blank=True, null=True, default=1)
content_format = models.CharField(choices=CONTENT_FORMAT_CHOICES, max_length=25, default='markdown')
deck = models.TextField(_('deck'), blank=True)
deck_html = models.TextField(blank=True)
body = models.TextField(_('body'))
body_html = models.TextField()
image = models.ForeignKey(Media, blank=True, null=True)
guid = models.CharField(max_length=32, unique=True)
objects = EntryManager()
class Meta:
db_table = 'r_Entry'
verbose_name_plural = 'entries'
ordering = ('-pub_date',)
get_latest_by = 'pub_date'
unique_together = (('slug','user'),)
def __unicode__(self):
return self.title
def Create_Draft(self):
DraftInstance = Draft(
entry=self,
title=self.title,
slug=self.slug,
user=self.user,
deck=self.deck,
deck_html=self.deck_html,
body=self.body,
body_html=self.body_html,
content_format=self.content_format
)
DraftInstance.save()
return DraftInstance
def get_absolute_url(self):
if self.post_type.slug == 'page':
return iri_to_uri(get_script_prefix().rstrip('/') + self.url)
else:
return "/%s/%s/" % (self.pub_date.strftime("%Y/%m").lower(), self.slug)
def is_published(self):
return self.is_active and self.pub_date <= datetime.datetime.now()
is_published.boolean = True
def save(self, *args, **kwargs):
if self.id:
self.Create_Draft()
if not self.body:
self.body = 'No text entered.'
self.body_html = 'No text entered.'
if not self.guid:
#GUID Generation
guid_base = "%s-%s-%s" % (self.user, self.pub_date, self.title)
guid_encoded = guid_base.encode('ascii', 'ignore')
guid = md5(guid_encoded).hexdigest()[:16]
self.guid = guid
if not self.pub_date:
self.pub_date = datetime.datetime.now
if self.content_format == u'markdown':
self.deck_html = markdown.markdown(smart_unicode(self.deck))
self.body_html = markdown.markdown(smart_unicode(self.body))
else:
self.body_html = self.body
self.deck_html = self.deck
if not self.title:
self.title = guid
self.slug = slugify(guid)
self.slug = slugify(self.title)
super(Entry, self).save(*args, **kwargs)
class Draft(models.Model):
entry = models.ForeignKey(Entry)
title = models.CharField(max_length=510, default='Untitled')
last_edit = models.DateTimeField(verbose_name=_("Edit Date"), help_text=_("Last time draft was saved."), default=datetime.datetime.now)
slug = models.SlugField(max_length=300, unique_for_date='pub_date')
content_format = models.CharField(choices=CONTENT_FORMAT_CHOICES, max_length=25, default='markdown')
deck = models.TextField(_('summary'), blank=True)
deck_html = models.TextField(blank=True)
body = models.TextField(_('body'))
body_html = models.TextField()
user = models.ForeignKey(settings.AUTH_USER_MODEL)
guid = models.CharField(max_length=32, unique=True)
class Meta:
db_table = 'r_Draft'
verbose_name_plural = 'drafts'
ordering = ('-title',)
get_latest_by = 'last_edit'
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if not self.guid:
#GUID Generation
guid_base = "%s-%s-%s" % (str(uuid.uuid4()), self.user, self.last_edit)
guid_encoded = guid_base.encode('ascii', 'ignore')
guid = md5(guid_encoded).hexdigest()[:16]
self.guid = guid
if self.content_format == u'markdown':
self.deck_html = markdown.markdown(smart_unicode(self.deck))
self.body_html = markdown.markdown(smart_unicode(self.body))
else:
self.deck_html = self.deck
self.body_html = self.body
super(Draft, self).save(*args, **kwargs)
class ReadMore(models.Model):
entry = models.ForeignKey(Entry)
guid = models.CharField(max_length=32, unique=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
pub_date = models.DateTimeField(verbose_name=_("Date Submitted"), default=datetime.datetime.now)
url = models.URLField(max_length=255)
title = models.CharField(max_length=510, default='Untitled')
summary = models.TextField(_('summary'), blank=True)
summary_html = models.TextField(blank=True)
class Meta:
db_table = 'r_ReadMore'
verbose_name_plural = 'Entry Links'
ordering = ('-title',)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if not self.guid:
#GUID Generation
guid_base = str(uuid.uuid4())
guid_encoded = guid_base.encode('ascii', 'ignore')
guid = md5(guid_encoded).hexdigest()[:16]
self.guid = guid
self.summary_html = markdown.markdown(smart_unicode(self.summary))
super(ReadMore, self).save(*args, **kwargs)
|
|
from __future__ import print_function
from past.builtins import xrange
from io import open
import vstruct
from vstruct.primitives import *
#const tag descriptions from wikipedia
#Tag byte Additional bytes Description of constant
#1 2+x bytes utf-8 string
#3 4 bytes Integer: a signed 32-bit two's complement number in big-endian format
#4 4 bytes Float: a 32-bit single-precision IEEE 754 floating-point number
#5 8 bytes Long: a signed 64-bit two's complement number in big-endian format (takes two slots in the constant pool table)
#6 8 bytes Double: a 64-bit double-precision IEEE 754 floating-point number (takes two slots in the constant pool table)
#7 2 bytes Class reference: an index within the constant pool to a UTF-8 string containing the fully qualified class name (in internal format)
#8 2 bytes String reference: an index within the constant pool to a UTF-8 string
#9 4 bytes Field reference: two indexes within the constant pool, the first pointing to a Class reference, the second to a Name and Type descriptor.
#10 4 bytes Method reference: two indexes within the constant pool, the first pointing to a Class reference, the second to a Name and Type descriptor.
#11 4 bytes Interface method reference: two indexes within the constant pool, the first pointing to a Class reference, the second to a Name and Type descriptor.
#12 4 bytes Name and type descriptor: two indexes to UTF-8 strings within the constant pool, the first representing a name (identifier) and the second a specially encoded type descriptor.
class ConstPoolStr(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.size = v_uint16(bigend=True)
self.strbytes = v_wstr(encode='utf8')
def pcb_size(self):
self.vsGetField('strbytes').vsSetLength( self.size )
tag_classes = {
1: ConstPoolStr,
3: v_uint32,
4: v_uint32,
5: v_uint64,
6: v_uint64,
7: v_uint16,
8: v_uint16,
9: v_uint32,
10: v_uint32,
11: v_uint32,
12: v_uint32,
}
# Constant Tag Types
CONSTANT_Utf8 = 1
CONSTANT_Integer = 3
CONSTANT_Float = 4
CONSTANT_Long = 5
CONSTANT_Double = 6
CONSTANT_Class = 7
CONSTANT_String = 8
CONSTANT_Fieldref = 9
CONSTANT_Methodref = 10
CONSTANT_InterfaceMethodref = 11
CONSTANT_NameAndType = 12
CONSTANT_MethodHandle = 15
CONSTANT_MethodType = 16
CONSTANT_InvokeDynamic = 18
# Access Flags Values
ACC_PUBLIC = 0x0001 # Declared public; may be accessed from outside its package.
ACC_PRIVATE = 0x0002 # Declared private; usable only within the defining class.
ACC_PROTECTED = 0x0004 # Declared protected; may be accessed within subclasses.
ACC_STATIC = 0x0008 # Declared static.
ACC_FINAL = 0x0010 # Declared final; never directly assigned to after object construction.
ACC_VOLATILE = 0x0040 # Declared volatile; cannot be cached.
ACC_TRANSIENT = 0x0080 # Declared transient; not written or read by a persistent object manager.
ACC_SYNTHETIC = 0x1000 # Declared synthetic; not present in the source code.
ACC_ENUM = 0x4000 # Declared as an element of an enum.
class ConstPoolInfo(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.tag = v_uint8()
self.data = vstruct.VStruct()
def pcb_tag(self):
cls = tag_classes.get( self.tag )
if cls == None:
raise Exception('Unknown ConstPoolInfo Tag: %s' % self.tag )
self.data.tagval = cls()
class AttributeInfo(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.attribute_name_index = v_uint16(bigend=True)
self.attribute_length = v_uint32(bigend=True)
self.attribute = v_bytes()
def pcb_attribute_length(self):
self.vsGetField('attribute').vsSetLength( self.attribute_length )
class FieldInfo(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.access_flags = v_uint16(bigend=True)
self.name_index = v_uint16(bigend=True)
self.descriptor_index = v_uint16(bigend=True)
self.attributes_count = v_uint16(bigend=True)
self.attributes = vstruct.VArray()
def pcb_attributes_count(self):
self.attributes.vsAddElements( self.attributes_count, AttributeInfo )
class MethodInfo(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.access_flags = v_uint16(bigend=True)
self.name_index = v_uint16(bigend=True)
self.descriptor_index = v_uint16(bigend=True)
self.attributes_count = v_uint16(bigend=True)
self.attributes = vstruct.VArray()
def pcb_attributes_count(self):
self.attributes.vsAddElements( self.attributes_count, AttributeInfo )
class JavaClass(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32(bigend=True)
self.minor_versino = v_uint16(bigend=True)
self.major_version = v_uint16(bigend=True)
self.const_pool_cnt = v_uint16(bigend=True)
self.const_pool = vstruct.VArray()
self.access_flags = v_uint16(bigend=True)
self.this_class = v_uint16(bigend=True)
self.super_class = v_uint16(bigend=True)
self.interface_cnt = v_uint16(bigend=True)
self.interfaces = vstruct.VArray()
self.fields_cnt = v_uint16(bigend=True)
self.fields = vstruct.VArray()
self.methods_cnt = v_uint16(bigend=True)
self.methods = vstruct.VArray()
self.attributes_cnt = v_uint16(bigend=True)
self.attributes = vstruct.VArray()
def pcb_const_pool_cnt(self):
# Count is off by one according to the spec
self.const_pool.vsAddElements( self.const_pool_cnt - 1, ConstPoolInfo )
def pcb_interface_cnt(self):
for i in xrange( self.interface_cnt ):
self.interfaces.vsAddElement( v_uint16( bigend=True ) )
def pcb_fields_cnt(self):
self.fields.vsAddElements( self.fields_cnt, FieldInfo )
def pcb_methods_cnt(self):
self.methods.vsAddElements( self.methods_cnt, MethodInfo )
def pcb_attributes_cnt(self):
self.attributes.vsAddElements( self.attributes_cnt, AttributeInfo )
def getClassName(self):
return self.const_pool[ self.this_class ].data.tagval.strbytes
def getSuperClassName(self):
return self.const_pool[ self.super_class ].data.tagval.strbytes
def getClassFields(self):
'''
Get the fields defined by this class as a tuple of
( fieldname, fieldtype, attribs ) where attribs is a dict.
'''
ret = []
for fname,fieldinfo in self.fields:
fieldname = self.const_pool[ fieldinfo.name_index - 1 ].data.tagval.strbytes
descname = self.const_pool[ fieldinfo.descriptor_index - 1 ].data.tagval.strbytes
attrs = {}
for afield, attrinfo in fieldinfo.attributes:
attrname = self.const_pool[ attrinfo.attribute_name_index - 1 ].data.tagval.strbytes
attrs[ attrname ] = attrinfo.attribute
ret.append( (fieldname, descname, attrs) )
return ret
def getClassMethods(self):
ret = []
for fname,methinfo in self.methods:
methname = self.const_pool[ methinfo.name_index - 1 ].data.tagval.strbytes
attrs = {}
for afield, attrinfo in methinfo.attributes:
attrname = self.const_pool[ attrinfo.attribute_name_index - 1 ].data.tagval.strbytes
attrs[ attrname ] = attrinfo.attribute
ret.append( (methname, attrs) )
return ret
def getClassAttributes(self):
attrs = {}
for afield, attrinfo in self.attributes:
attrname = self.const_pool[ attrinfo.attribute_name_index - 1 ].data.tagval.strbytes
attrs[ attrname ] = attrinfo.attribute
return attrs
if __name__ == '__main__':
import sys
import traceback
for fname in sys.argv[1:]:
fbytes = open(fname,'rb').read()
c = JavaClass()
try:
c.vsParse( fbytes )
print(c.tree())
cname = c.getClassName()
sname = c.getSuperClassName()
print('Java Class: %s (inherits: %s)' % ( cname, sname ))
for fname,descname,attrs in c.getClassFields():
print('Field: %s (%s) (attrs: %r)' % ( fname, descname, attrs.keys()) )
for methname,attrs in c.getClassMethods():
print('Method: %s (attrs: %r)' % (methname, attrs.keys()))
print('Constants:')
for fname,const in c.const_pool:
print(const.tag,const.data.tree())
print(c.getClassAttributes().keys())
except Exception as e:
print(c.tree())
traceback.print_exc()
|
|
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""
Galois/Counter Mode (GCM).
"""
__all__ = ['GcmMode']
from Crypto.Util.py3compat import b, bchr, byte_string, bord, unhexlify
from Crypto.Util.number import long_to_bytes, bytes_to_long
from Crypto.Hash import BLAKE2s
from Crypto.Random import get_random_bytes
from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer,
create_string_buffer, get_raw_buffer,
SmartPointer, c_size_t, expect_byte_string)
_raw_galois_lib = load_pycryptodome_raw_lib("Crypto.Util._galois",
"""
int ghash( uint8_t y_out[16],
const uint8_t block_data[],
size_t len,
const uint8_t y_in[16],
const void *exp_key);
int ghash_expand(const uint8_t h[16],
void **ghash_tables);
int ghash_destroy(void *ghash_tables);
""")
class _GHASH(object):
"""GHASH function defined in NIST SP 800-38D, Algorithm 2.
If X_1, X_2, .. X_m are the blocks of input data, the function
computes:
X_1*H^{m} + X_2*H^{m-1} + ... + X_m*H
in the Galois field GF(2^256) using the reducing polynomial
(x^128 + x^7 + x^2 + x + 1).
"""
def __init__(self, subkey):
assert len(subkey) == 16
expect_byte_string(subkey)
self._exp_key = VoidPointer()
result = _raw_galois_lib.ghash_expand(subkey,
self._exp_key.address_of())
if result:
raise ValueError("Error %d while expanding the GMAC key" % result)
self._exp_key = SmartPointer(self._exp_key.get(),
_raw_galois_lib.ghash_destroy)
# create_string_buffer always returns a string of zeroes
self._last_y = create_string_buffer(16)
def update(self, block_data):
assert len(block_data) % 16 == 0
expect_byte_string(block_data)
result = _raw_galois_lib.ghash(self._last_y,
block_data,
c_size_t(len(block_data)),
self._last_y,
self._exp_key.get())
if result:
raise ValueError("Error %d while updating GMAC" % result)
return self
def digest(self):
return get_raw_buffer(self._last_y)
def enum(**enums):
return type('Enum', (), enums)
MacStatus = enum(PROCESSING_AUTH_DATA=1, PROCESSING_CIPHERTEXT=2)
class GcmMode(object):
"""Galois Counter Mode (GCM).
This is an Authenticated Encryption with Associated Data (`AEAD`_) mode.
It provides both confidentiality and authenticity.
The header of the message may be left in the clear, if needed, and it will
still be subject to authentication. The decryption step tells the receiver
if the message comes from a source that really knowns the secret key.
Additionally, decryption detects if any part of the message - including the
header - has been modified or corrupted.
This mode requires a *nonce*.
This mode is only available for ciphers that operate on 128 bits blocks
(e.g. AES but not TDES).
See `NIST SP800-38D`_.
.. _`NIST SP800-38D`: http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
.. _AEAD: http://blog.cryptographyengineering.com/2012/05/how-to-choose-authenticated-encryption.html
:undocumented: __init__
"""
def __init__(self, factory, key, nonce, mac_len, cipher_params):
self.block_size = factory.block_size
if self.block_size != 16:
raise ValueError("GCM mode is only available for ciphers"
" that operate on 128 bits blocks")
if len(nonce) == 0:
raise ValueError("Nonce cannot be empty")
if not byte_string(nonce):
raise TypeError("Nonce must be a byte string")
self.nonce = nonce
"""Nonce"""
self._factory = factory
self._key = key
self._tag = None # Cache for MAC tag
self._mac_len = mac_len
if not (4 <= mac_len <= 16):
raise ValueError("Parameter 'mac_len' must be in the range 4..16")
# Allowed transitions after initialization
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
self._no_more_assoc_data = False
# Length of associated data
self._auth_len = 0
# Length of the ciphertext or plaintext
self._msg_len = 0
# Step 1 in SP800-38D, Algorithm 4 (encryption) - Compute H
# See also Algorithm 5 (decryption)
hash_subkey = factory.new(key,
self._factory.MODE_ECB,
**cipher_params
).encrypt(bchr(0) * 16)
# Step 2 - Compute J0 (integer, not byte string!)
if len(nonce) == 12:
self._j0 = bytes_to_long(nonce + b("\x00\x00\x00\x01"))
else:
fill = (16 - (len(nonce) % 16)) % 16 + 8
ghash_in = (nonce +
bchr(0) * fill +
long_to_bytes(8 * len(nonce), 8))
self._j0 = bytes_to_long(_GHASH(hash_subkey)
.update(ghash_in)
.digest())
# Step 3 - Prepare GCTR cipher for encryption/decryption
self._cipher = factory.new(key,
self._factory.MODE_CTR,
initial_value=self._j0 + 1,
nonce=b(""),
**cipher_params)
# Step 5 - Bootstrat GHASH
self._signer = _GHASH(hash_subkey)
# Step 6 - Prepare GCTR cipher for GMAC
self._tag_cipher = factory.new(key,
self._factory.MODE_CTR,
initial_value=self._j0,
nonce=b(""),
**cipher_params)
# Cache for data to authenticate
self._cache = b("")
self._status = MacStatus.PROCESSING_AUTH_DATA
def update(self, assoc_data):
"""Protect associated data
If there is any associated data, the caller has to invoke
this function one or more times, before using
``decrypt`` or ``encrypt``.
By *associated data* it is meant any data (e.g. packet headers) that
will not be encrypted and will be transmitted in the clear.
However, the receiver is still able to detect any modification to it.
In GCM, the *associated data* is also called
*additional authenticated data* (AAD).
If there is no associated data, this method must not be called.
The caller may split associated data in segments of any size, and
invoke this method multiple times, each time with the next segment.
:Parameters:
assoc_data : byte string
A piece of associated data. There are no restrictions on its size.
"""
if self.update not in self._next:
raise TypeError("update() can only be called"
" immediately after initialization")
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
self._update(assoc_data)
self._auth_len += len(assoc_data)
return self
def _update(self, data):
assert(len(self._cache) < 16)
if len(self._cache) > 0:
filler = min(16 - len(self._cache), len(data))
self._cache += data[:filler]
data = data[filler:]
if len(self._cache) < 16:
return
# The cache is exactly one block
self._signer.update(self._cache)
self._cache = b("")
update_len = len(data) // 16 * 16
self._cache = data[update_len:]
if update_len > 0:
self._signer.update(data[:update_len])
def _pad_cache_and_update(self):
assert(len(self._cache) < 16)
# The authenticated data A is concatenated to the minimum
# number of zero bytes (possibly none) such that the
# - ciphertext C is aligned to the 16 byte boundary.
# See step 5 in section 7.1
# - ciphertext C is aligned to the 16 byte boundary.
# See step 6 in section 7.2
len_cache = len(self._cache)
if len_cache > 0:
self._update(bchr(0) * (16 - len_cache))
def encrypt(self, plaintext):
"""Encrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have encrypted a message
you cannot encrypt (or decrypt) another message using the same
object.
The data to encrypt can be broken up in two or
more pieces and `encrypt` can be called multiple times.
That is, the statement:
>>> c.encrypt(a) + c.encrypt(b)
is equivalent to:
>>> c.encrypt(a+b)
This function does not add any padding to the plaintext.
:Parameters:
plaintext : byte string
The piece of data to encrypt.
It can be of any length.
:Return:
the encrypted data, as a byte string.
It is as long as *plaintext*.
"""
if self.encrypt not in self._next:
raise TypeError("encrypt() can only be called after"
" initialization or an update()")
self._next = [self.encrypt, self.digest]
ciphertext = self._cipher.encrypt(plaintext)
if self._status == MacStatus.PROCESSING_AUTH_DATA:
self._pad_cache_and_update()
self._status = MacStatus.PROCESSING_CIPHERTEXT
self._update(ciphertext)
self._msg_len += len(plaintext)
return ciphertext
def decrypt(self, ciphertext):
"""Decrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have decrypted a message
you cannot decrypt (or encrypt) another message with the same
object.
The data to decrypt can be broken up in two or
more pieces and `decrypt` can be called multiple times.
That is, the statement:
>>> c.decrypt(a) + c.decrypt(b)
is equivalent to:
>>> c.decrypt(a+b)
This function does not remove any padding from the plaintext.
:Parameters:
ciphertext : byte string
The piece of data to decrypt.
It can be of any length.
:Return: the decrypted data (byte string).
"""
if self.decrypt not in self._next:
raise TypeError("decrypt() can only be called"
" after initialization or an update()")
self._next = [self.decrypt, self.verify]
if self._status == MacStatus.PROCESSING_AUTH_DATA:
self._pad_cache_and_update()
self._status = MacStatus.PROCESSING_CIPHERTEXT
self._update(ciphertext)
self._msg_len += len(ciphertext)
return self._cipher.decrypt(ciphertext)
def digest(self):
"""Compute the *binary* MAC tag in an AEAD mode.
The caller invokes this function at the very end.
This method returns the MAC that shall be sent to the receiver,
together with the ciphertext.
:Return: the MAC, as a byte string.
"""
if self.digest not in self._next:
raise TypeError("digest() cannot be called when decrypting"
" or validating a message")
self._next = [self.digest]
return self._compute_mac()
def _compute_mac(self):
"""Compute MAC without any FSM checks."""
if self._tag:
return self._tag
# Step 5 in NIST SP 800-38D, Algorithm 4 - Compute S
self._pad_cache_and_update()
self._update(long_to_bytes(8 * self._auth_len, 8))
self._update(long_to_bytes(8 * self._msg_len, 8))
s_tag = self._signer.digest()
# Step 6 - Compute T
self._tag = self._tag_cipher.encrypt(s_tag)[:self._mac_len]
return self._tag
def hexdigest(self):
"""Compute the *printable* MAC tag.
This method is like `digest`.
:Return: the MAC, as a hexadecimal string.
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def verify(self, received_mac_tag):
"""Validate the *binary* MAC tag.
The caller invokes this function at the very end.
This method checks if the decrypted message is indeed valid
(that is, if the key is correct) and it has not been
tampered with while in transit.
:Parameters:
received_mac_tag : byte string
This is the *binary* MAC, as received from the sender.
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
if self.verify not in self._next:
raise TypeError("verify() cannot be called"
" when encrypting a message")
self._next = [self.verify]
secret = get_random_bytes(16)
mac1 = BLAKE2s.new(digest_bits=160, key=secret,
data=self._compute_mac())
mac2 = BLAKE2s.new(digest_bits=160, key=secret,
data=received_mac_tag)
if mac1.digest() != mac2.digest():
raise ValueError("MAC check failed")
def hexverify(self, hex_mac_tag):
"""Validate the *printable* MAC tag.
This method is like `verify`.
:Parameters:
hex_mac_tag : string
This is the *printable* MAC, as received from the sender.
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
self.verify(unhexlify(hex_mac_tag))
def encrypt_and_digest(self, plaintext):
"""Perform encrypt() and digest() in one step.
:Parameters:
plaintext : byte string
The piece of data to encrypt.
:Return:
a tuple with two byte strings:
- the encrypted data
- the MAC
"""
return self.encrypt(plaintext), self.digest()
def decrypt_and_verify(self, ciphertext, received_mac_tag):
"""Perform decrypt() and verify() in one step.
:Parameters:
ciphertext : byte string
The piece of data to decrypt.
received_mac_tag : byte string
This is the *binary* MAC, as received from the sender.
:Return: the decrypted data (byte string).
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
plaintext = self.decrypt(ciphertext)
self.verify(received_mac_tag)
return plaintext
def _create_gcm_cipher(factory, **kwargs):
"""Create a new block cipher, configured in Galois Counter Mode (GCM).
:Parameters:
factory : module
A block cipher module, taken from `Crypto.Cipher`.
The cipher must have block length of 16 bytes.
GCM has been only defined for `Crypto.Cipher.AES`.
:Keywords:
key : byte string
The secret key to use in the symmetric cipher.
It must be 16 (e.g. *AES-128*), 24 (e.g. *AES-192*)
or 32 (e.g. *AES-256*) bytes long.
nonce : byte string
A value that must never be reused for any other encryption.
There are no restrictions on its length,
but it is recommended to use at least 16 bytes.
The nonce shall never repeat for two
different messages encrypted with the same key,
but it does not need to be random.
If not provided, a 16 byte nonce will be randomly created.
mac_len : integer
Length of the MAC, in bytes.
It must be no larger than 16 bytes (which is the default).
"""
try:
key = kwargs.pop("key")
except KeyError as e:
raise TypeError("Missing parameter:" + str(e))
nonce = kwargs.pop("nonce", None)
if nonce is None:
nonce = get_random_bytes(16)
mac_len = kwargs.pop("mac_len", 16)
return GcmMode(factory, key, nonce, mac_len, kwargs)
|
|
#!/usr/bin/env python27
# -*- coding: utf-8 -*-
import urllib2
import StringIO
import gzip
import pprint
import time
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import csv
import boto
import gcs_oauth2_boto_plugin
import httplib2
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from my_config import *
def download_data(base_url, file_name, output_file_path):
# http://stackoverflow.com/questions/15352668/download-and-decompress-gzipped-file-in-memory
response = urllib2.urlopen(base_url + file_name)
compressed_file = StringIO.StringIO()
compressed_file.write(response.read())
compressed_file.seek(0)
decompressed_file = gzip.GzipFile(fileobj=compressed_file, mode='rb')
with open(output_file_path, 'w') as outfile:
outfile.write(decompressed_file.read())
return True
def preproccess_data(covtype_data):
# drop the id col
covtype_data = covtype_data.drop(0, axis=1)
# move target to first column as required by google prediction
cols = covtype_data.columns.tolist()
cols = cols[-1:] + cols[:-1]
covtype_data = covtype_data[cols]
# set the Cover_Type col from integer to string labels
covtype_data[54] = covtype_data[54].map({
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven'
})
return covtype_data
def split_data(covtype_data, train_ratio=0.27, write_to_disk=True):
msk = np.random.rand(len(covtype_data)) < 0.27
train_set = covtype_data[msk]
test_set = covtype_data[~msk]
test_set_response = pd.DataFrame(test_set.iloc[:, 0].copy())
test_set = test_set.iloc[:, 1:].copy()
if write_to_disk:
train_set.to_csv('train.csv', header=False, index=False, quoting=csv.QUOTE_NONNUMERIC)
test_set.to_csv('test.csv', header=False, index=False) # not quoting
test_set_response.to_csv('test_real_response.csv', header=False, index=False)
return (train_set, test_set, test_set_response)
def create_buckets(names_list, project_id):
# https://cloud.google.com/storage/docs/gspythonlibrary
for name in names_list:
uri = boto.storage_uri(name, GOOGLE_STORAGE)
try:
header_values = {"x-goog-project-id": project_id}
uri.create_bucket(headers=header_values)
print 'Successfully created bucket "%s"' % name
except boto.exception.StorageCreateError, e:
print 'Failed to create bucket:', e
return True
def upload_files_to_bucket(file_names_list, bucket_name):
# https://cloud.google.com/storage/docs/gspythonlibrary
for filename in file_names_list:
dst_uri = boto.storage_uri(
bucket_name + '/' + filename, GOOGLE_STORAGE)
dst_uri.new_key().set_contents_from_filename(filename)
print 'Successfully created "%s/%s"' % (
dst_uri.bucket_name, dst_uri.object_name)
return True
def build_prediction_service(client_id, client_secret,
scope='https://www.googleapis.com/auth/prediction',
credential_file='credentials.json',
api='prediction', api_version='v1.6'):
# https://developers.google.com/api-client-library/python/start/get_started
# https://code.google.com/p/google-api-python-client/source/browse/samples/prediction/prediction.py
flow = OAuth2WebServerFlow(client_id, client_secret, scope)
storage = Storage(credential_file)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = build(api, api_version, http=http)
return service
def build_prediction_model(service, project_number, model_id, storage_data_location):
# https://code.google.com/p/google-api-python-client/source/browse/samples/prediction/prediction.py
papi = service.trainedmodels()
print 'start training'
body = {
'id': model_id,
'storageDataLocation': storage_data_location,
}
start = papi.insert(project=project_number, body=body).execute()
print 'Waiting for training to complete'
while True:
status = papi.get(project=project_number, id=model_id).execute()
state = status['trainingStatus']
print 'Training state: ' + state
if state == 'DONE':
break
elif state == 'RUNNING':
time.sleep(SLEEP_TIME)
continue
else:
raise Exception('Training Error: ' + state)
# Job has completed.
print 'Training completed:'
pprint.pprint(status)
break
print 'getting analyzed result'
model_result = papi.analyze(project=project_number, id=model_id).execute()
pprint.pprint(model_result)
return model_result
def make_prediction(service, project_number, model_id, test_set, quota=100):
# Google Prediction do not allow multiple csvInstance in predict()
# Google has limitted quota!!!!!!!!!!!
nrow_test_set = test_set.shape[0]
quota = int(quota)
if quota > nrow_test_set:
quota = nrow_test_set
prediction_list = []
papi = service.trainedmodels()
for i in range(0, quota):
print 'predicting '+str(i)+'-th data row'
ith_row_list = map(int, test_set.iloc[i].tolist())
body = {
'input': {
'csvInstance': ith_row_list
}
}
response = papi.predict(project=project_number, id=model_id, body=body).execute()
p = response['outputLabel']
prediction_list.append(p)
return prediction_list
def get_accuracy(true_values, predicted_values, print_it=True):
accuracy = accuracy_score(true_values, predicted_values)
if print_it:
print accuracy
return accuracy
def sklearn_prediction(train_set, test_set):
forest = RandomForestClassifier(n_estimators = 100)
forest = forest.fit(train_set.iloc[:,1:], train_set.iloc[:,0])
prediction_list = forest.predict(test_data).tolist()
return prediction_list
def main():
# set up environments
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
project_id = PROJ_ID
project_number = PROJ_NUM
model_id = MODEL_ID
bucket_name = BUCKET_NAME
quota = QUOTA
gcs_oauth2_boto_plugin.SetFallbackClientIdAndSecret(client_id, client_secret)
# download data
download_data('https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/',
'covtype.data.gz', 'covtype.data')
# read it and reproccess it
covtype_data = pd.read_csv('covtype.data', header=None)
covtype_data = preproccess_data(covtype_data)
# split it
train_set, test_set, test_set_response = split_data(covtype_data)
# create new bucket on Google Cloud Storage
create_buckets([bucket_name], project_id)
# upload the training set
upload_files_to_bucket(['train.csv'], bucket_name)
# register a prediction service
service = build_prediction_service(client_id, client_secret,
scope='https://www.googleapis.com/auth/prediction '+
'https://www.googleapis.com/auth/devstorage.read_write '+
'https://www.googleapis.com/auth/devstorage.full_control ' +
'https://www.googleapis.com/auth/devstorage.read_only')
# build the prediction online
model_result = build_prediction_model(service, project_number, model_id, bucket_name+'/train.csv')
# make the prediction based on the quota
prediction_list = make_prediction(service, project_number, model_id, test_set, quota)
# evaluate the accuracy
true_list = test_set_response.iloc[0:quota, 0].tolist()
print 'Google Prediction accuracy is:'
get_accuracy(true_list, prediction_list, print_it=True)
# for comparation
skl_prediction_list = sklearn_prediction(train_set, test_set)[0:quota]
print 'scikit-learn random forest accuracy is:'
get_accuracy(true_list, skl_prediction_list, print_it=True)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
High-level history functions. These wrap the basic his_read function to allow
some alternate representations of the historical data.
"""
import hszinc
import fysom
import pytz
from copy import deepcopy
from datetime import tzinfo
from six import string_types
from ...util import state
from ...util.asyncexc import AsynchronousException
try:
from pandas import Series, DataFrame
HAVE_PANDAS = True
except ImportError: # pragma: no cover
# Not covered, since we'll always have 'pandas' available during tests.
HAVE_PANDAS = False
def _resolve_tz(tz):
"""
Resolve a given timestamp.
"""
if (tz is None) or isinstance(tz, tzinfo):
return tz
if isinstance(tz, string_types):
if "/" in tz:
# Olson database name
return pytz.timezone(tz)
else:
return hszinc.zoneinfo.timezone(tz)
class HisReadSeriesOperation(state.HaystackOperation):
"""
Read the series data from a 'point' entity and present it in a concise
format.
"""
FORMAT_LIST = "list" # [(ts1, value1), (ts2, value2), ...]
FORMAT_DICT = "dict" # {ts1: value1, ts2: value2, ...}
FORMAT_SERIES = "series" # pandas.Series
def __init__(self, session, point, rng, tz, series_format):
"""
Read the series data and return it.
:param session: Haystack HTTP session object.
:param point: ID of historical 'point' object to read.
:param rng: Range to read from 'point'
:param tz: Timezone to translate timezones to. May be None.
:param series_format: What format to present the series in.
"""
super(HisReadSeriesOperation, self).__init__()
if series_format not in (
self.FORMAT_LIST,
self.FORMAT_DICT,
self.FORMAT_SERIES,
):
raise ValueError("Unrecognised series_format %s" % series_format)
if (series_format == self.FORMAT_SERIES) and (not HAVE_PANDAS):
raise NotImplementedError("pandas not available.")
if isinstance(rng, slice):
rng = ",".join(
[
hszinc.dump_scalar(p, mode=hszinc.MODE_ZINC)
for p in (rng.start, rng.stop)
]
)
self._session = session
self._point = point
self._range = rng
self._tz = _resolve_tz(tz)
self._series_format = series_format
self._state_machine = fysom.Fysom(
initial="init",
final="done",
events=[
# Event Current State New State
("go", "init", "read"),
("read_done", "read", "done"),
("exception", "*", "done"),
],
callbacks={"onenterread": self._do_read, "onenterdone": self._do_done},
)
def go(self):
self._state_machine.go()
def _do_read(self, event):
"""
Request the data from the server.
"""
self._session.his_read(
point=self._point, rng=self._range, callback=self._on_read
)
def _on_read(self, operation, **kwargs):
"""
Process the grid, format it into the requested format.
"""
try:
# See if the read succeeded.
operation.wait
grid = operation.result
if self._tz is None:
conv_ts = lambda ts: ts
else:
conv_ts = lambda ts: ts.astimezone(self._tz)
# Convert grid to list of tuples
data = [(conv_ts(row["ts"]), row["val"]) for row in grid]
units = ""
values = []
if self._series_format == self.FORMAT_DICT:
data = dict(data)
elif self._series_format == self.FORMAT_SERIES:
# Split into index and data.
try:
(index, data) = zip(*data)
if isinstance(data[0], hszinc.Quantity) or isinstance(
data[-1], hszinc.Quantity
):
for each in data:
try:
values.append(each.value)
if units == "":
units = each.unit
except AttributeError:
if isinstance(each, float):
values.append(each)
continue
else:
values = data
except ValueError:
values = []
index = []
units = ""
# ser = Series(data=data[0].value, index=index)
meta_serie = MetaSeries(data=values, index=index)
meta_serie.add_meta("units", units)
meta_serie.add_meta("point", self._point)
self._state_machine.read_done(result=meta_serie)
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
class HisReadFrameOperation(state.HaystackOperation):
"""
Read the series data from several 'point' entities and present them in a
concise format.
"""
FORMAT_LIST = "list" # [{'ts': ts1, 'col1': val1, ...}, {...}, ...]
FORMAT_DICT = "dict" # {ts1: {'col1': val1, ...}, ts2: ...}
FORMAT_FRAME = "frame" # pandas.DataFrame
def __init__(self, session, columns, rng, tz, frame_format):
"""
Read the series data and return it.
:param session: Haystack HTTP session object.
:param columns: IDs of historical point objects to read.
:param rng: Range to read from 'point'
:param tz: Timezone to translate timezones to. May be None.
:param frame_format: What format to present the frame in.
"""
super(HisReadFrameOperation, self).__init__()
self._log = session._log.getChild("his_read_frame")
if frame_format not in (self.FORMAT_LIST, self.FORMAT_DICT, self.FORMAT_FRAME):
raise ValueError("Unrecognised frame_format %s" % frame_format)
if (frame_format == self.FORMAT_FRAME) and (not HAVE_PANDAS):
raise NotImplementedError("pandas not available.")
if isinstance(rng, slice):
rng = ",".join(
[
hszinc.dump_scalar(p, mode=hszinc.MODE_ZINC)
for p in (rng.start, rng.stop)
]
)
# Convert the columns to a list of tuples.
strip_ref = lambda r: r.name if isinstance(r, hszinc.Ref) else r
if isinstance(columns, dict):
# Ensure all are strings to references
columns = [(str(c), strip_ref(r)) for c, r in columns.items()]
else:
# Translate to a dict:
columns = [(strip_ref(c), c) for c in columns]
self._session = session
self._columns = columns
self._range = hszinc.dump_scalar(rng, mode=hszinc.MODE_ZINC)
self._tz = _resolve_tz(tz)
self._frame_format = frame_format
self._data_by_ts = {}
self._todo = set([c[0] for c in columns])
self._state_machine = fysom.Fysom(
initial="init",
final="done",
events=[
# Event Current State New State
("probe_multi", "init", "probing"),
("do_multi_read", "probing", "multi_read"),
("all_read_done", "multi_read", "postprocess"),
("do_single_read", "probing", "single_read"),
("all_read_done", "single_read", "postprocess"),
("process_done", "postprocess", "done"),
("exception", "*", "done"),
],
callbacks={
"onenterprobing": self._do_probe_multi,
"onentermulti_read": self._do_multi_read,
"onentersingle_read": self._do_single_read,
"onenterpostprocess": self._do_postprocess,
"onenterdone": self._do_done,
},
)
def go(self):
self._state_machine.probe_multi()
def _do_probe_multi(self, event):
self._log.debug("Probing for multi-his-read support")
self._session.has_features(
[self._session.FEATURE_HISREAD_MULTI], callback=self._on_probe_multi
)
def _on_probe_multi(self, operation, **kwargs):
try:
result = operation.result
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
return
if result.get(self._session.FEATURE_HISREAD_MULTI):
# Session object supports multi-his-read
self._log.debug("Using multi-his-read support")
self._state_machine.do_multi_read()
else:
# Emulate multi-his-read with separate
self._log.debug("No multi-his-read support, emulating")
self._state_machine.do_single_read()
def _get_ts_rec(self, ts):
try:
return self._data_by_ts[ts]
except KeyError:
rec = {}
self._data_by_ts[ts] = rec
return rec
def _do_multi_read(self, event):
"""
Request the data from the server as a single multi-read request.
"""
self._session.multi_his_read(
points=[c[1] for c in self._columns],
rng=self._range,
callback=self._on_multi_read,
)
def _on_multi_read(self, operation, **kwargs):
"""
Handle the multi-valued grid.
"""
try:
grid = operation.result
if self._tz is None:
conv_ts = lambda ts: ts
else:
conv_ts = lambda ts: ts.astimezone(self._tz)
for row in grid:
ts = conv_ts(row["ts"])
rec = self._get_ts_rec(ts)
for (col_idx, (col, _)) in enumerate(self._columns):
val = row.get("v%d" % col_idx)
if (val is not None) or (self._frame_format != self.FORMAT_FRAME):
rec[col] = val
self._state_machine.all_read_done()
except: # Catch all exceptions to pass to caller.
self._log.debug("Hit exception", exc_info=1)
self._state_machine.exception(result=AsynchronousException())
def _do_single_read(self, event):
"""
Request the data from the server as multiple single-read requests.
"""
for col, point in self._columns:
self._log.debug("Column %s point %s", col, point)
self._session.his_read(
point,
self._range,
lambda operation, **kw: self._on_single_read(operation, col=col),
)
def _on_single_read(self, operation, col, **kwargs):
"""
Handle the multi-valued grid.
"""
self._log.debug("Response back for column %s", col)
try:
grid = operation.result
if self._tz is None:
conv_ts = lambda ts: ts
else:
conv_ts = lambda ts: ts.astimezone(self._tz)
self._log.debug("%d records for %s: %s", len(grid), col, grid)
for row in grid:
ts = conv_ts(row["ts"])
if self._tz is None:
self._tz = ts.tzinfo
rec = self._get_ts_rec(ts)
val = row.get("val")
if (val is not None) or (self._frame_format != self.FORMAT_FRAME):
rec[col] = val
self._todo.discard(col)
self._log.debug("Still waiting for: %s", self._todo)
if not self._todo:
# No more to read
self._state_machine.all_read_done()
except: # Catch all exceptions to pass to caller.
self._log.debug("Hit exception", exc_info=1)
self._state_machine.exception(result=AsynchronousException())
def _do_postprocess(self, event):
"""
Convert the dict-of-dicts to the desired frame format.
"""
self._log.debug("Post-processing")
try:
if self._frame_format == self.FORMAT_LIST:
def _merge_ts(item):
rec = item[1].copy()
rec["ts"] = item[0]
return rec
data = list(map(_merge_ts, list(self._data_by_ts.items())))
# print(data)
elif self._frame_format == self.FORMAT_FRAME:
# Build from dict
data = MetaDataFrame.from_dict(self._data_by_ts, orient="index")
def convert_quantity(val):
"""
If value is Quantity, convert to value
"""
if isinstance(val, hszinc.Quantity):
return val.value
else:
return val
def get_units(serie):
try:
first_element = serie.dropna()[0]
except IndexError: # needed for empty results
return ""
if isinstance(first_element, hszinc.Quantity):
return first_element.unit
else:
return ""
for name, serie in data.iteritems():
"""
Convert Quantity and put unit in metadata
"""
data.add_meta(name, get_units(serie))
data[name] = data[name].apply(convert_quantity)
else:
data = self._data_by_ts
self._state_machine.process_done(result=data)
except: # Catch all exceptions to pass to caller.
self._log.debug("Hit exception", exc_info=1)
self._state_machine.exception(result=AsynchronousException())
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
class HisWriteSeriesOperation(state.HaystackOperation):
"""
Write the series data to a 'point' entity.
"""
def __init__(self, session, point, series, tz):
"""
Write the series data to the point.
:param session: Haystack HTTP session object.
:param point: ID of historical 'point' object to write.
:param series: Series data to be written to the point.
:param tz: If not None, a datetime.tzinfo instance for this write.
"""
super(HisWriteSeriesOperation, self).__init__()
# We've either been given an Entity instance or a string/reference
# giving the name of an entity.
if isinstance(point, string_types) or isinstance(point, hszinc.Ref):
# We have the name of an entity, we'll need to fetch it.
self._entity_id = point
self._point = None
else:
# We have an entity.
self._point = point
self._entity_id = point.id
self._session = session
self._series = series
self._tz = _resolve_tz(tz)
self._state_machine = fysom.Fysom(
initial="init",
final="done",
events=[
# Event Current State New State
("have_tz", "init", "write"),
("have_point", "init", "get_point_tz"),
("need_point", "init", "get_point"),
("have_point", "get_point", "get_point_tz"),
("have_tz", "get_point_tz", "write"),
("need_equip", "get_point_tz", "get_equip"),
("have_equip", "get_equip", "get_equip_tz"),
("have_tz", "get_equip_tz", "write"),
("need_site", "get_equip_tz", "get_site"),
("have_site", "get_site", "get_site_tz"),
("have_tz", "get_site_tz", "write"),
("write_done", "write", "done"),
("exception", "*", "done"),
],
callbacks={
"onenterget_point": self._do_get_point,
"onenterget_point_tz": self._do_get_point_tz,
"onenterget_equip": self._do_get_equip,
"onenterget_equip_tz": self._do_get_equip_tz,
"onenterget_site": self._do_get_site,
"onenterget_site_tz": self._do_get_site_tz,
"onenterwrite": self._do_write,
"onenterdone": self._do_done,
},
)
def go(self):
if self._tz is not None: # Do we have a timezone?
# We do!
self._state_machine.have_tz()
elif self._point is not None: # Nope, do we have the point?
# We do!
self._state_machine.have_point()
else:
# We need to fetch the point to get its timezone.
self._state_machine.need_point()
def _do_get_point(self, event):
"""
Retrieve the point entity.
"""
self._session.get_entity(self._entity_id, single=True, callback=self._got_point)
def _got_point(self, operation, **kwargs):
"""
Process the return value from get_entity
"""
try:
self._point = operation.result
self._state_machine.have_point()
except:
self._state_machine.exception(result=AsynchronousException())
def _do_get_point_tz(self, event):
"""
See if the point has a timezone?
"""
if hasattr(self._point, "tz") and isinstance(self._point.tz, tzinfo):
# We have our timezone.
self._tz = self._point.tz
self._state_machine.have_tz()
else:
# Nope, look at the equip then.
self._state_machine.need_equip()
def _do_get_equip(self, event):
"""
Retrieve the equip entity.
"""
self._point.get_equip(callback=self._got_equip)
def _got_equip(self, operation, **kwargs):
"""
Process the return value from get_entity
"""
try:
equip = operation.result
self._state_machine.have_equip(equip=equip)
except:
self._state_machine.exception(result=AsynchronousException())
def _do_get_equip_tz(self, event):
"""
See if the equip has a timezone?
"""
equip = event.equip
if hasattr(equip, "tz") and isinstance(equip.tz, tzinfo):
# We have our timezone.
self._tz = equip.tz
self._state_machine.have_tz()
else:
# Nope, look at the site then.
self._state_machine.need_site()
def _do_get_site(self, event):
"""
Retrieve the site entity.
"""
self._point.get_site(callback=self._got_site)
def _got_site(self, operation, **kwargs):
"""
Process the return value from get_entity
"""
try:
site = operation.result
self._state_machine.have_site(site=site)
except:
self._state_machine.exception(result=AsynchronousException())
def _do_get_site_tz(self, event):
"""
See if the site has a timezone?
"""
site = event.site
if hasattr(site, "tz") and isinstance(site.tz, tzinfo):
# We have our timezone.
self._tz = site.tz
self._state_machine.have_tz()
else:
try:
# Nope, no idea then.
raise ValueError(
"No timezone specified for operation, " "point, equip or site."
)
except:
self._state_machine.exception(result=AsynchronousException())
def _do_write(self, event):
"""
Push the data to the server.
"""
try:
# Process the timestamp records into an appropriate format.
if hasattr(self._series, "to_dict"):
records = self._series.to_dict()
elif not isinstance(self._series, dict):
records = dict(self._series)
else:
records = self._series
if not bool(records):
# No data, skip writing this series.
self._state_machine.write_done(result=None)
return
# Time-shift the records.
if hasattr(self._tz, "localize"):
localise = (
lambda ts: self._tz.localize(ts)
if ts.tzinfo is None
else ts.astimezone(self._tz)
)
else:
localise = (
lambda ts: ts.replace(tzinfo=self._tz)
if ts.tzinfo is None
else ts.astimezone(self._tz)
)
records = dict([(localise(ts), val) for ts, val in records.items()])
# Write the data
self._session.his_write(
point=self._entity_id,
timestamp_records=records,
callback=self._on_write,
)
except:
self._state_machine.exception(result=AsynchronousException())
def _on_write(self, operation, **kwargs):
"""
Handle the write error, if any.
"""
try:
# See if the write succeeded.
grid = operation.result
if not isinstance(grid, hszinc.Grid):
raise TypeError("Unexpected result: %r" % grid)
# Move to the done state.
self._state_machine.write_done(result=None)
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
class HisWriteFrameOperation(state.HaystackOperation):
"""
Write the series data to several 'point' entities.
"""
def __init__(self, session, columns, frame, tz):
"""
Write the series data.
:param session: Haystack HTTP session object.
:param columns: IDs of historical point objects to read.
:param frame: Range to read from 'point'
:param tz: Timezone to translate timezones to.
"""
super(HisWriteFrameOperation, self).__init__()
self._log = session._log.getChild("his_write_frame")
tz = _resolve_tz(tz)
if tz is None:
tz = pytz.utc
if hasattr(tz, "localize"):
localise = (
lambda ts: tz.localize(ts) if ts.tzinfo is None else ts.astimezone(tz)
)
else:
localise = (
lambda ts: ts.replace(tzinfo=tz)
if ts.tzinfo is None
else ts.astimezone(tz)
)
# Convert frame to list of records.
if HAVE_PANDAS:
# Convert Pandas frame to dict of dicts form.
if isinstance(frame, DataFrame):
self._log.debug("Convert from Pandas DataFrame")
raw_frame = frame.to_dict(orient="dict")
frame = {}
for col, col_data in raw_frame.items():
for ts, val in col_data.items():
try:
frame_rec = frame[ts]
except KeyError:
frame_rec = {}
frame[ts] = frame_rec
frame[col] = val
# Convert dict of dicts to records, de-referencing column names.
if isinstance(frame, dict):
if columns is None:
def _to_rec(item):
(ts, raw_record) = item
record = raw_record.copy()
record["ts"] = ts
return record
else:
def _to_rec(item):
(ts, raw_record) = item
record = {}
for col, val in raw_record.items():
entity = columns[col]
if hasattr(entity, "id"):
entity = entity.id
if isinstance(entity, hszinc.Ref):
entity = entity.name
record[entity] = val
record["ts"] = ts
return record
frame = list(map(_to_rec, list(frame.items())))
elif columns is not None:
# Columns are aliased. De-alias the column names.
frame = deepcopy(frame)
for row in frame:
ts = row.pop("ts")
raw = row.copy()
row.clear()
row["ts"] = ts
for column, point in columns.items():
try:
value = raw.pop(column)
except KeyError:
self._log.debug(
"At %s missing column %s (for %s): %s",
ts,
column,
point,
raw,
)
continue
row[session._obj_to_ref(point).name] = value
# Localise all timestamps, extract columns:
columns = set()
def _localise_rec(r):
r["ts"] = localise(r["ts"])
columns.update(set(r.keys()) - set(["ts"]))
return r
frame = list(map(_localise_rec, frame))
self._session = session
self._frame = frame
self._columns = columns
self._todo = columns.copy()
self._tz = _resolve_tz(tz)
self._state_machine = fysom.Fysom(
initial="init",
final="done",
events=[
# Event Current State New State
("probe_multi", "init", "probing"),
("no_data", "init", "done"),
("do_multi_write", "probing", "multi_write"),
("all_write_done", "multi_write", "done"),
("do_single_write", "probing", "single_write"),
("all_write_done", "single_write", "done"),
("exception", "*", "done"),
],
callbacks={
"onenterprobing": self._do_probe_multi,
"onentermulti_write": self._do_multi_write,
"onentersingle_write": self._do_single_write,
"onenterdone": self._do_done,
},
)
def go(self):
if not bool(self._columns):
self._log.debug("No data to write")
self._state_machine.no_data(result=None)
else:
self._state_machine.probe_multi()
def _do_probe_multi(self, event):
self._log.debug("Probing for multi-his-write support")
self._session.has_features(
[self._session.FEATURE_HISWRITE_MULTI], callback=self._on_probe_multi
)
def _on_probe_multi(self, operation, **kwargs):
try:
result = operation.result
except: # Catch all exceptions to pass to caller.
self._log.warning("Unable to probe multi-his-write support", exc_info=1)
self._state_machine.exception(result=AsynchronousException())
result = {}
return
self._log.debug("Got result: %s", result)
if result.get(self._session.FEATURE_HISWRITE_MULTI):
# Session object supports multi-his-write
self._log.debug("Using multi-his-write support")
self._state_machine.do_multi_write()
else:
# Emulate multi-his-write with separate
self._log.debug("No multi-his-write support, emulating")
self._state_machine.do_single_write()
def _do_multi_write(self, event):
"""
Request the data from the server as a single multi-read request.
"""
self._session.multi_his_write(self._frame, callback=self._on_multi_write)
def _on_multi_write(self, operation, **kwargs):
"""
Handle the multi-valued grid.
"""
try:
grid = operation.result
if not isinstance(grid, hszinc.Grid):
raise ValueError("Unexpected result %r" % grid)
self._state_machine.all_write_done(result=None)
except: # Catch all exceptions to pass to caller.
self._log.debug("Hit exception", exc_info=1)
self._state_machine.exception(result=AsynchronousException())
def _do_single_write(self, event):
"""
Submit the data in single write requests.
"""
for point in self._columns:
self._log.debug("Point %s", point)
# Extract a series for this column
series = dict(
[
(r["ts"], r[point])
for r in filter(lambda r: r.get(point) is not None, self._frame)
]
)
self._session.his_write_series(
point,
series,
callback=lambda operation, **kw: self._on_single_write(
operation, point=point
),
)
def _on_single_write(self, operation, point, **kwargs):
"""
Handle the single write.
"""
self._log.debug("Response back for point %s", point)
try:
res = operation.result
if res is not None:
raise ValueError("Unexpected result %r" % res)
self._todo.discard(point)
self._log.debug("Still waiting for: %s", self._todo)
if not self._todo:
# No more to read
self._state_machine.all_write_done(result=None)
except: # Catch all exceptions to pass to caller.
self._log.debug("Hit exception", exc_info=1)
self._state_machine.exception(result=AsynchronousException())
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
if HAVE_PANDAS:
class MetaSeries(Series):
"""
Custom Pandas Serie with meta data
"""
meta = {}
@property
def _constructor(self):
return MetaSeries
def add_meta(self, key, value):
self.meta[key] = value
class MetaDataFrame(DataFrame):
"""
Custom Pandas Dataframe with meta data
Made from MetaSeries
"""
meta = {}
def __init__(self, *args, **kw):
super(MetaDataFrame, self).__init__(*args, **kw)
@property
def _constructor(self):
return MetaDataFrame
_constructor_sliced = MetaSeries
def add_meta(self, key, value):
self.meta[key] = value
|
|
"""
-*- coding: utf-8 -*- {{{
vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
Copyright (c) 2017, Battelle Memorial Institute
All rights reserved.
1. Battelle Memorial Institute (hereinafter Battelle) hereby grants
permission to any person or entity lawfully obtaining a copy of this
software and associated documentation files (hereinafter "the Software")
to redistribute and use the Software in source and binary forms, with or
without modification. Such person or entity may use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and
may permit others to do so, subject to the following conditions:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimers.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Other than as used herein, neither the name Battelle Memorial Institute
or Battelle may be used in any form whatsoever without the express
written consent of Battelle.
2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the
United States Government. Neither the United States Government nor the United
States Department of Energy, nor Battelle, nor any of their employees, nor any
jurisdiction or organization that has cooperated in the development of these
materials, makes any warranty, express or implied, or assumes any legal
liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or
represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or service by
trade name, trademark, manufacturer, or otherwise does not necessarily
constitute or imply its endorsement, recommendation, or favoring by the
United States Government or any agency thereof, or Battelle Memorial Institute.
The views and opinions of authors expressed herein do not necessarily state or
reflect those of the United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by
BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
}}}
"""
import re
import abc
from sympy import symbols
from sympy.core import numbers
from sympy.parsing.sympy_parser import parse_expr
from collections import deque
import logging
from datetime import timedelta as td
from volttron.platform.agent.utils import setup_logging
from ilc.ilc_matrices import (extract_criteria, calc_column_sums, normalize_matrix,
validate_input, build_score, input_matrix)
setup_logging()
_log = logging.getLogger(__name__)
criterion_registry = {}
def register_criterion(name):
def decorator(klass):
criterion_registry[name] = klass
return klass
return decorator
def parse_sympy(data, condition=False):
"""
Parser for sympy.
:param data:
:param condition:
:return:
"""
def clean_text(text, rep={" ": ""}):
rep = dict((re.escape(k), v) for k, v in rep.iteritems())
pattern = re.compile("|".join(rep.keys()))
new_key = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return new_key
if isinstance(data, dict):
return_data = {}
for key, value in data.items():
new_key = clean_text(key)
return_data[new_key] = value
elif isinstance(data, list):
if condition:
return_data = ""
for item in data:
parsed_string = clean_text(item)
parsed_string = "(" + clean_text(item) + ")" if parsed_string not in ("&", "|") else parsed_string
return_data += parsed_string
else:
return_data = []
for item in data:
return_data.append(clean_text(item))
else:
return_data = clean_text(data)
#_log.debug("Parsing: {} to {}".format(data, return_data))
return return_data
class CriteriaCluster(object):
def __init__(self, priority, criteria_labels, row_average, cluster_config):
self.criteria = {}
self.priority = priority
self.criteria_labels = criteria_labels
self.row_average = row_average
global mappers
try:
mappers = cluster_config.pop("mappers")
except KeyError:
mappers = {}
for device_name, device_criteria in cluster_config.items():
self.criteria[device_name] = DeviceCriteria(device_criteria)
def get_all_evaluations(self):
results = {}
for name, device in self.criteria.items():
for device_id in device.criteria.keys():
evaluations = device.evaluate(device_id)
results[name, device_id] = evaluations
return results
class CriteriaContainer(object):
def __init__(self):
self.clusters = []
self.devices = {}
def add_criteria_cluster(self, cluster):
self.clusters.append(cluster)
self.devices.update(cluster.criteria)
def get_score_order(self):
all_scored = []
for cluster in self.clusters:
evaluations = cluster.get_all_evaluations()
_log.debug('Device Evaluations: ' + str(evaluations))
if not evaluations:
continue
input_arr = input_matrix(evaluations, cluster.criteria_labels)
scores = build_score(input_arr, cluster.row_average, cluster.priority)
all_scored.extend(scores)
_log.debug('Input Array: ' + str(input_arr))
_log.debug('Scored devices: ' + str(scores))
all_scored.sort(reverse=True)
results = [x[1] for x in all_scored]
return results
def get_device(self, device_name):
return self.devices[device_name]
class DeviceCriteria(object):
def __init__(self, criteria_config):
self.criteria = {}
self.points = {}
self.expressions = {}
self.condition = {}
for device_id, device_criteria in criteria_config.items():
criteria = Criteria(device_criteria)
self.criteria[device_id] = criteria
def ingest_data(self, time_stamp, data):
for criteria in self.criteria.values():
criteria.ingest_data(time_stamp, data)
def criteria_status(self, token, status):
self.criteria[token].criteria_status(status)
def evaluate(self, token):
return self.criteria[token].evaluate()
class Criteria(object):
def __init__(self, criteria):
self.criteria = {}
for name, criterion in criteria.items():
self.add(name, criterion)
def add(self, name, criterion):
_log.debug("Criteria: {}".format(criterion))
operation_type = criterion.pop('operation_type')
klass = criterion_registry[operation_type]
self.criteria[name] = klass(**criterion)
def evaluate(self):
results = {}
for name, criterion in self.criteria.items():
result = criterion.evaluate_criterion()
results[name] = result
return results
def ingest_data(self, time_stamp, data):
for criterion in self.criteria.values():
criterion.ingest_data(time_stamp, data)
def criteria_status(self, status):
for criterion in self.criteria.values():
criterion.criteria_status(status)
class BaseCriterion(object):
__metaclass__ = abc.ABCMeta
def __init__(self, minimum=None, maximum=None):
self.min_func = (lambda x: x) if minimum is None else (lambda x: max(x, minimum))
self.max_func = (lambda x: x) if maximum is None else (lambda x: min(x, maximum))
self.minimum = minimum
self.maximum = maximum
def numeric_check(self, value):
"""
Ensure the value returned by a criteria is a numeric type. If the value of a criteria is non-numeric the value
will be converted if possible. If it is not the fall-back will be to return zero.
:param value:
:return:
"""
if not isinstance(value, (int, float, long, numbers.Float, numbers.Integer)):
if isinstance(value, str):
try:
value = float(value)
except ValueError:
value = 0.0
elif isinstance(value, complex):
value = value.real
else:
value = 0.0
return value
def evaluate_bounds(self, value):
"""
If the value of the evaluated criteria is less than the minimum or greater than the maximum configured value for
the criteria return the minimum or maximum value respectively.
:param value:
:return:
"""
value = self.min_func(value)
value = self.max_func(value)
return value
def evaluate_criterion(self):
value = self.evaluate()
value = self.numeric_check(value)
value = self.evaluate_bounds(value)
return value
@abc.abstractmethod
def evaluate(self):
pass
def ingest_data(self, time_stamp, data):
pass
def criteria_status(self, status):
pass
@register_criterion('status')
class StatusCriterion(BaseCriterion):
def __init__(self, on_value=None, off_value=0.0, point_name=None, **kwargs):
super(StatusCriterion, self).__init__(**kwargs)
if on_value is None or point_name is None:
raise ValueError('Missing parameter')
self.on_value = on_value
self.off_value = off_value
self.point_name = point_name
self.current_status = False
def evaluate(self):
if self.current_status:
value = self.on_value
else:
value = self.off_value
return value
def ingest_data(self, time_stamp, data):
self.current_status = bool(data[self.point_name])
@register_criterion('constant')
class ConstantCriterion(BaseCriterion):
def __init__(self, value=None, off_value=0.0, point_name=None, **kwargs):
super(ConstantCriterion, self).__init__(**kwargs)
if value is None:
raise ValueError('Missing parameter')
self.value = value
def evaluate(self):
return self.value
@register_criterion('formula')
class FormulaCriterion(BaseCriterion):
def __init__(self, operation=None, operation_args=None, **kwargs):
super(FormulaCriterion, self).__init__(**kwargs)
if operation is None or operation_args is None:
raise ValueError('Missing parameter')
# backward compatibility with older configuration files
if isinstance(operation_args, list):
operation_args = {arg: "always" for arg in operation_args}
operation_points = operation_args.keys()
self.operation_parms = operation_args.values()
print operation_args.keys(), operation_args.values()
self.operation_args = parse_sympy(operation_points)
self.points = symbols(self.operation_args)
self.expr = parse_expr(parse_sympy(operation))
self.point_list = []
self.status = False
def evaluate(self):
if self.point_list:
value = self.expr.subs(self.point_list)
else:
value = self.minimum
return value
def ingest_data(self, time_stamp, data):
point_list = []
for point, parm in zip(self.operation_args, self.operation_parms):
if parm.lower() == "nc" and self.status:
point_list.append([item for item in self.point_list if item[0] == point].pop())
_log.debug("device is curtailed use old value: {} -- {} -- {}".format(point, data[point], point_list))
else:
_log.debug("device is normal use current value: {} - {}".format(point, data[point]))
point_list.append((point, data[point]))
self.point_list = point_list
def criteria_status(self, status):
self.status = status
@register_criterion('mapper')
class MapperCriterion(BaseCriterion):
def __init__(self, dict_name=None, map_key=None, **kwargs):
super(MapperCriterion, self).__init__(**kwargs)
if dict_name is None or map_key is None:
raise ValueError('Missing parameter')
self.value = mappers[dict_name][map_key]
def evaluate(self):
return self.value
@register_criterion('history')
class HistoryCriterion(BaseCriterion):
def __init__(self, comparison_type=None, point_name=None, previous_time=None, **kwargs):
super(HistoryCriterion, self).__init__(**kwargs)
if comparison_type is None or point_name is None or previous_time is None:
raise ValueError('Missing parameter')
self.history = deque()
self.comparison_type = comparison_type
self.point_name = point_name
self.previous_time_delta = td(minutes=previous_time)
self.current_value = None
self.history_time = None
def linear_interpolation(self, date1, value1, date2, value2, target_date):
end_delta_t = (date2 - date1).total_seconds()
target_delta_t = (target_date - date1).total_seconds()
return (value2 - value1) * (target_delta_t / end_delta_t) + value1
def evaluate(self):
if self.current_value is None:
return self.minimum
pre_timestamp, pre_value = self.history.pop()
if pre_timestamp > self.history_time:
self.history.append((pre_timestamp, pre_value))
return self.minimum
post_timestamp, post_value = self.history.pop()
while post_timestamp < self.history_time:
pre_value, pre_timestamp = post_value, post_timestamp
post_timestamp, post_value = self.history.pop()
self.history.append((post_timestamp, post_value))
prev_value = self.linear_interpolation(pre_timestamp, pre_value, post_timestamp, post_value, self.history_time)
if self.comparison_type == 'direct':
value = abs(prev_value - self.current_value)
elif self.comparison_type == 'inverse':
value = 1 / abs(prev_value - self.current_value)
return value
def ingest_data(self, time_stamp, data):
self.history_time = time_stamp - self.previous_time_delta
self.current_value = data[self.point_name]
self.history.appendleft((time_stamp, self.current_value))
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
JahnTeller distortion analysis.
"""
import os
import warnings
from typing import Any, Dict, Literal, Optional, Tuple, Union, cast
import numpy as np
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.local_env import (
LocalStructOrderParams,
get_neighbors_of_site_with_index,
)
from pymatgen.core.periodic_table import Species, get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class JahnTellerAnalyzer:
"""
Will attempt to classify if structure *may* be Jahn-Teller active.
Class currently uses datafile of hard-coded common Jahn-Teller
active ions.
If structure is annotated with magnetic moments, will estimate
if structure may be high-spin or low-spin.
Class aims for more false-positives than false-negatives.
"""
def __init__(self):
"""
Init for JahnTellerAnalyzer.
"""
self.spin_configs = {
"oct": { # key is number of d electrons
0: {"high": {"e_g": 0, "t_2g": 0}, "default": "high"},
1: {"high": {"e_g": 0, "t_2g": 1}, "default": "high"}, # weak J-T
2: {"high": {"e_g": 0, "t_2g": 2}, "default": "high"}, # weak
3: {"high": {"e_g": 0, "t_2g": 3}, "default": "high"}, # no J-T
4: {
"high": {"e_g": 1, "t_2g": 3},
"low": {"e_g": 0, "t_2g": 4},
"default": "high",
}, # strong high, weak low
5: {
"high": {"e_g": 2, "t_2g": 3},
"low": {"e_g": 0, "t_2g": 5},
"default": "low",
}, # no high, weak low
6: {
"high": {"e_g": 2, "t_2g": 4},
"low": {"e_g": 0, "t_2g": 6},
"default": "high",
}, # weak high, no low
7: {
"high": {"e_g": 2, "t_2g": 5},
"low": {"e_g": 1, "t_2g": 6},
"default": "low",
}, # weak high, strong low
8: {"high": {"e_g": 2, "t_2g": 6}, "default": "high"}, # no
9: {"high": {"e_g": 3, "t_2g": 6}, "default": "high"}, # strong
10: {"high": {"e_g": 4, "t_2g": 6}, "default": "high"},
},
"tet": { # no low spin observed experimentally in tetrahedral, all weak J-T
0: {"high": {"e": 0, "t_2": 0}, "default": "high"},
1: {"high": {"e": 1, "t_2": 0}, "default": "high"},
2: {"high": {"e": 2, "t_2": 0}, "default": "high"},
3: {"high": {"e": 2, "t_2": 1}, "default": "high"},
4: {"high": {"e": 2, "t_2": 2}, "default": "high"},
5: {"high": {"e": 2, "t_2": 3}, "default": "high"},
6: {"high": {"e": 3, "t_2": 3}, "default": "high"},
7: {"high": {"e": 4, "t_2": 3}, "default": "high"},
8: {"high": {"e": 4, "t_2": 4}, "default": "high"},
9: {"high": {"e": 4, "t_2": 5}, "default": "high"},
10: {"high": {"e": 4, "t_2": 6}, "default": "high"},
},
}
def get_analysis_and_structure(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Tuple[Dict, Structure]:
"""Obtain an analysis of a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
analysis of structure, with key 'strength' which may be 'none', 'strong',
'weak', or 'unknown' (Default value = 0.1) and decorated structure
"""
structure = structure.get_primitive_structure()
if calculate_valences:
bva = BVAnalyzer()
structure = bva.get_oxi_state_decorated_structure(structure)
# no point testing multiple equivalent sites, doesn't make any difference to analysis
# but makes returned
symmetrized_structure = SpacegroupAnalyzer(structure).get_symmetrized_structure()
# to detect structural motifs of a given site
op = LocalStructOrderParams(["oct", "tet"])
# dict of site index to the Jahn-Teller analysis of that site
jt_sites = []
non_jt_sites = []
for indices in symmetrized_structure.equivalent_indices:
idx = indices[0]
site = symmetrized_structure[idx]
# only interested in sites with oxidation states
if isinstance(site.specie, Species) and site.specie.element.is_transition_metal:
# get motif around site
order_params = op.get_order_parameters(symmetrized_structure, idx)
if order_params[0] > order_params[1] and order_params[0] > op_threshold:
motif = "oct"
motif_order_parameter = order_params[0]
elif order_params[1] > op_threshold:
motif = "tet"
motif_order_parameter = order_params[1]
else:
motif = "unknown"
motif_order_parameter = None
if motif in ["oct", "tet"]:
motif = cast(Literal["oct", "tet"], motif) # mypy needs help
# guess spin of metal ion
if guesstimate_spin and "magmom" in site.properties:
# estimate if high spin or low spin
magmom = site.properties["magmom"]
spin_state = self._estimate_spin_state(site.specie, motif, magmom)
else:
spin_state = "unknown"
magnitude = self.get_magnitude_of_effect_from_species(site.specie, spin_state, motif)
if magnitude != "none":
ligands = get_neighbors_of_site_with_index(structure, idx, approach="min_dist", delta=0.15)
ligand_bond_lengths = [ligand.distance(structure[idx]) for ligand in ligands]
ligands_species = list({str(ligand.specie) for ligand in ligands})
ligand_bond_length_spread = max(ligand_bond_lengths) - min(ligand_bond_lengths)
def trim(f):
"""
Avoid storing to unreasonable precision, hurts readability.
"""
return float(f"{f:.4f}")
# to be Jahn-Teller active, all ligands have to be the same
if len(ligands_species) == 1:
jt_sites.append(
{
"strength": magnitude,
"motif": motif,
"motif_order_parameter": trim(motif_order_parameter),
"spin_state": spin_state,
"species": str(site.specie),
"ligand": ligands_species[0],
"ligand_bond_lengths": [trim(length) for length in ligand_bond_lengths],
"ligand_bond_length_spread": trim(ligand_bond_length_spread),
"site_indices": indices,
}
)
# store reasons for not being J-T active
else:
non_jt_sites.append(
{
"site_indices": indices,
"strength": "none",
"reason": "Not Jahn-Teller active for this electronic configuration.",
}
)
else:
non_jt_sites.append(
{
"site_indices": indices,
"strength": "none",
"reason": f"motif is {motif}",
}
)
# perform aggregation of all sites
if jt_sites:
analysis = {"active": True} # type: Dict[str, Any]
# if any site could exhibit 'strong' Jahn-Teller effect
# then mark whole structure as strong
strong_magnitudes = [site["strength"] == "strong" for site in jt_sites]
if any(strong_magnitudes):
analysis["strength"] = "strong"
else:
analysis["strength"] = "weak"
analysis["sites"] = jt_sites
return analysis, structure
return {"active": False, "sites": non_jt_sites}, structure
def get_analysis(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Dict:
"""
Convenience method, uses get_analysis_and_structure method.
Obtain an analysis of a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
analysis of structure, with key 'strength' which may be 'none', 'strong',
'weak', or 'unknown' (Default value = 0.1)
"""
return self.get_analysis_and_structure(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)[0]
def is_jahn_teller_active(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> bool:
"""
Convenience method, uses get_analysis_and_structure method.
Check if a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
boolean, True if might be Jahn-Teller active, False if not
"""
active = False
try:
analysis = self.get_analysis(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)
active = analysis["active"]
except Exception as e:
warnings.warn(f"Error analyzing {structure.composition.reduced_formula}: {e}")
return active
def tag_structure(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Structure:
"""
Convenience method, uses get_analysis_and_structure method.
Add a "possible_jt_active" site property on Structure.
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
Decorated Structure, will be in primitive setting.
"""
try:
analysis, structure = self.get_analysis_and_structure(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)
jt_sites = [False] * len(structure)
if analysis["active"]:
for site in analysis["sites"]:
for index in site["site_indices"]:
jt_sites[index] = True
structure.add_site_property("possible_jt_active", jt_sites)
return structure
except Exception as e:
warnings.warn(f"Error analyzing {structure.composition.reduced_formula}: {e}")
return structure
@staticmethod
def _get_number_of_d_electrons(species: Species) -> float:
"""
Get number of d electrons of a species.
Args:
species: Species object
Returns: Number of d electrons.
"""
# TODO: replace with more generic Hund's rule algorithm?
# taken from get_crystal_field_spin
elec = species.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(f"Invalid element {species.symbol} for crystal field calculation.")
nelectrons = int(elec[-1][2] + elec[-2][2] - species.oxi_state)
if nelectrons < 0 or nelectrons > 10:
raise AttributeError(f"Invalid oxidation state {species.oxi_state} for element {species.symbol}")
return nelectrons
def get_magnitude_of_effect_from_species(self, species: Union[str, Species], spin_state: str, motif: str) -> str:
"""
Get magnitude of Jahn-Teller effect from provided species, spin state and motif.
Args:
species: e.g. Fe2+
spin_state: "high" or "low"
motif: "oct" or "tet"
Returns: "none", "weak" or "strong
"""
magnitude = "none"
sp = get_el_sp(species)
# has to be Species; we need to know the oxidation state
if isinstance(sp, Species) and sp.element.is_transition_metal:
d_electrons = self._get_number_of_d_electrons(sp)
if motif in self.spin_configs:
if spin_state not in self.spin_configs[motif][d_electrons]:
spin_state = self.spin_configs[motif][d_electrons]["default"]
spin_config = self.spin_configs[motif][d_electrons][spin_state]
magnitude = JahnTellerAnalyzer.get_magnitude_of_effect_from_spin_config(motif, spin_config)
else:
warnings.warn("No data for this species.")
return magnitude
@staticmethod
def get_magnitude_of_effect_from_spin_config(motif: str, spin_config: Dict[str, float]) -> str:
"""
Roughly, the magnitude of Jahn-Teller distortion will be:
* in octahedral environments, strong if e_g orbitals
unevenly occupied but weak if t_2g orbitals unevenly
occupied
* in tetrahedral environments always weaker
Args:
motif: "oct" or "tet"
spin_config: dict of 'e' (e_g) and 't' (t2_g)
with number of electrons in each state
Returns: "none", "weak" or "strong"
"""
magnitude = "none"
if motif == "oct":
e_g = spin_config["e_g"]
t_2g = spin_config["t_2g"]
if (e_g % 2 != 0) or (t_2g % 3 != 0):
magnitude = "weak"
if e_g % 2 == 1:
magnitude = "strong"
elif motif == "tet":
e = spin_config["e"]
t_2 = spin_config["t_2"]
if (e % 3 != 0) or (t_2 % 2 != 0):
magnitude = "weak"
return magnitude
@staticmethod
def _estimate_spin_state(
species: Union[str, Species], motif: Literal["oct", "tet"], known_magmom: float
) -> Literal["undefined", "low", "high", "unknown"]:
"""Simple heuristic to estimate spin state. If magnetic moment
is sufficiently close to that predicted for a given spin state,
we assign it that state. If we only have data for one spin
state then that's the one we use (e.g. we assume all tetrahedral
complexes are high-spin, since this is typically the case).
Args:
species: str or Species
motif ("oct" | "tet"): Tetrahedron or octahedron crystal site coordination
known_magmom: magnetic moment in Bohr magnetons
Returns:
"undefined" (if only one spin state possible), "low", "high" or "unknown"
"""
mu_so_high = JahnTellerAnalyzer.mu_so(species, motif=motif, spin_state="high")
mu_so_low = JahnTellerAnalyzer.mu_so(species, motif=motif, spin_state="low")
if mu_so_high == mu_so_low:
return "undefined" # undefined or only one spin state possible
if mu_so_high is None:
return "low"
if mu_so_low is None:
return "high"
diff = mu_so_high - mu_so_low
# WARNING! this heuristic has not been robustly tested or benchmarked
# using 'diff*0.25' as arbitrary measure, if known magmom is
# too far away from expected value, we don't try to classify it
if known_magmom > mu_so_high or abs(mu_so_high - known_magmom) < diff * 0.25:
return "high"
if known_magmom < mu_so_low or abs(mu_so_low - known_magmom) < diff * 0.25:
return "low"
return "unknown"
@staticmethod
def mu_so(
species: Union[str, Species], motif: Literal["oct", "tet"], spin_state: Literal["high", "low"]
) -> Optional[float]:
"""Calculates the spin-only magnetic moment for a
given species. Only supports transition metals.
Args:
species: Species
motif ("oct" | "tet"): Tetrahedron or octahedron crystal site coordination
spin_state ("low" | "high"): Whether the species is in a high or low spin state
Returns:
float: Spin-only magnetic moment in Bohr magnetons or None if
species crystal field not defined
"""
try:
sp = get_el_sp(species)
n = sp.get_crystal_field_spin(coordination=motif, spin_config=spin_state)
# calculation spin-only magnetic moment for this number of unpaired spins
return np.sqrt(n * (n + 2))
except AttributeError:
return None
|
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import json
import logging
import itertools
from botocore.exceptions import ClientError
from concurrent.futures import as_completed
from c7n.actions import ActionRegistry, BaseAction, ModifyVpcSecurityGroupsAction
from c7n.exceptions import PolicyValidationError
from c7n.filters import (
FilterRegistry, ValueFilter, DefaultVpcBase, AgeFilter, OPERATORS,
CrossAccountAccessFilter)
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n.resolver import ValuesFrom
from c7n.query import QueryResourceManager
from c7n import tags
from c7n.utils import (
type_schema, local_session, chunks, generate_arn, get_retry,
snapshot_identifier)
log = logging.getLogger('custodian.redshift')
filters = FilterRegistry('redshift.filters')
actions = ActionRegistry('redshift.actions')
filters.register('marked-for-op', tags.TagActionFilter)
@resources.register('redshift')
class Redshift(QueryResourceManager):
class resource_type(object):
service = 'redshift'
type = 'cluster'
enum_spec = ('describe_clusters', 'Clusters', None)
detail_spec = None
name = id = 'ClusterIdentifier'
filter_name = 'ClusterIdentifier'
filter_type = 'scalar'
date = 'ClusterCreateTime'
dimension = 'ClusterIdentifier'
config_type = "AWS::Redshift::Cluster"
filter_registry = filters
action_registry = actions
retry = staticmethod(get_retry(('Throttling',)))
permissions = ('iam:ListRoles',) # account id retrieval
_generate_arn = None
@property
def generate_arn(self):
if self._generate_arn is None:
self._generate_arn = functools.partial(
generate_arn, 'redshift', region=self.config.region,
account_id=self.account_id, resource_type='cluster',
separator=':')
return self._generate_arn
@filters.register('default-vpc')
class DefaultVpc(DefaultVpcBase):
""" Matches if an redshift database is in the default vpc
:example:
.. code-block:: yaml
policies:
- name: redshift-default-vpc
resource: redshift
filters:
- default-vpc
"""
schema = type_schema('default-vpc')
def __call__(self, redshift):
return (redshift.get('VpcId') and
self.match(redshift.get('VpcId')) or False)
@filters.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "VpcSecurityGroups[].VpcSecurityGroupId"
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = ""
def get_permissions(self):
return RedshiftSubnetGroup(self.manager.ctx, {}).get_permissions()
def get_related_ids(self, resources):
group_ids = set()
for r in resources:
group_ids.update(
[s['SubnetIdentifier'] for s in
self.groups[r['ClusterSubnetGroupName']]['Subnets']])
return group_ids
def process(self, resources, event=None):
self.groups = {r['ClusterSubnetGroupName']: r for r in
RedshiftSubnetGroup(self.manager.ctx, {}).resources()}
return super(SubnetFilter, self).process(resources, event)
filters.register('network-location', net_filters.NetworkLocation)
@filters.register('param')
class Parameter(ValueFilter):
"""Filter redshift clusters based on parameter values
:example:
.. code-block:: yaml
policies:
- name: redshift-no-ssl
resource: redshift
filters:
- type: param
key: require_ssl
value: false
op: eq
"""
schema = type_schema('param', rinherit=ValueFilter.schema)
group_params = ()
permissions = ("redshift:DescribeClusterParameters",)
def process(self, clusters, event=None):
groups = {}
for r in clusters:
for pg in r['ClusterParameterGroups']:
groups.setdefault(pg['ParameterGroupName'], []).append(
r['ClusterIdentifier'])
def get_params(group_name):
c = local_session(self.manager.session_factory).client('redshift')
paginator = c.get_paginator('describe_cluster_parameters')
param_group = list(itertools.chain(*[p['Parameters']
for p in paginator.paginate(ParameterGroupName=group_name)]))
params = {}
for p in param_group:
v = p['ParameterValue']
if v != 'default' and p['DataType'] in ('integer', 'boolean'):
# overkill..
v = json.loads(v)
params[p['ParameterName']] = v
return params
with self.executor_factory(max_workers=3) as w:
group_names = groups.keys()
self.group_params = dict(
zip(group_names, w.map(get_params, group_names)))
return super(Parameter, self).process(clusters, event)
def __call__(self, db):
params = {}
for pg in db['ClusterParameterGroups']:
params.update(self.group_params[pg['ParameterGroupName']])
return self.match(params)
@actions.register('delete')
class Delete(BaseAction):
"""Action to delete a redshift cluster
To prevent unwanted deletion of redshift clusters, it is recommended to
apply a filter to the rule
:example:
.. code-block:: yaml
policies:
- name: redshift-no-ssl
resource: redshift
filters:
- type: param
key: require_ssl
value: false
op: eq
actions:
- type: delete
"""
schema = type_schema(
'delete', **{'skip-snapshot': {'type': 'boolean'}})
permissions = ('redshift:DeleteCluster',)
def process(self, clusters):
with self.executor_factory(max_workers=2) as w:
futures = []
for db_set in chunks(clusters, size=5):
futures.append(
w.submit(self.process_db_set, db_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception deleting redshift set \n %s",
f.exception())
def process_db_set(self, db_set):
skip = self.data.get('skip-snapshot', False)
c = local_session(self.manager.session_factory).client('redshift')
for db in db_set:
params = {'ClusterIdentifier': db['ClusterIdentifier']}
if skip:
params['SkipFinalClusterSnapshot'] = True
else:
params['FinalClusterSnapshotIdentifier'] = snapshot_identifier(
'Final', db['ClusterIdentifier'])
try:
c.delete_cluster(**params)
except ClientError as e:
if e.response['Error']['Code'] == "InvalidClusterState":
self.log.warning(
"Cannot delete cluster when not 'Available' state: %s",
db['ClusterIdentifier'])
continue
raise
@actions.register('retention')
class RetentionWindow(BaseAction):
"""Action to set the snapshot retention period (in days)
:example:
.. code-block:: yaml
policies:
- name: redshift-snapshot-retention
resource: redshift
filters:
- type: value
key: AutomatedSnapshotRetentionPeriod
value: 21
op: ne
actions:
- type: retention
days: 21
"""
date_attribute = 'AutomatedSnapshotRetentionPeriod'
schema = type_schema(
'retention',
**{'days': {'type': 'number'}})
permissions = ('redshift:ModifyCluster',)
def process(self, clusters):
with self.executor_factory(max_workers=2) as w:
futures = []
for cluster in clusters:
futures.append(w.submit(
self.process_snapshot_retention,
cluster))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception setting Redshift retention \n %s",
f.exception())
def process_snapshot_retention(self, cluster):
current_retention = int(cluster.get(self.date_attribute, 0))
new_retention = self.data['days']
if current_retention < new_retention:
self.set_retention_window(
cluster,
max(current_retention, new_retention))
return cluster
def set_retention_window(self, cluster, retention):
c = local_session(self.manager.session_factory).client('redshift')
c.modify_cluster(
ClusterIdentifier=cluster['ClusterIdentifier'],
AutomatedSnapshotRetentionPeriod=retention)
@actions.register('snapshot')
class Snapshot(BaseAction):
"""Action to take a snapshot of a redshift cluster
:example:
.. code-block:: yaml
policies:
- name: redshift-snapshot
resource: redshift
filters:
- type: value
key: ClusterStatus
value: available
op: eq
actions:
- snapshot
"""
schema = type_schema('snapshot')
permissions = ('redshift:CreateClusterSnapshot',)
def process(self, clusters):
with self.executor_factory(max_workers=3) as w:
futures = []
for cluster in clusters:
futures.append(w.submit(
self.process_cluster_snapshot,
cluster))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception creating Redshift snapshot \n %s",
f.exception())
return clusters
def process_cluster_snapshot(self, cluster):
c = local_session(self.manager.session_factory).client('redshift')
cluster_tags = cluster.get('Tags')
c.create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier(
'Backup',
cluster['ClusterIdentifier']),
ClusterIdentifier=cluster['ClusterIdentifier'], Tags=cluster_tags)
@actions.register('enable-vpc-routing')
class EnhancedVpcRoutine(BaseAction):
"""Action to enable enhanced vpc routing on a redshift cluster
More: https://goo.gl/espcOF
:example:
.. code-block:: yaml
policies:
- name: redshift-enable-enhanced-routing
resource: redshift
filters:
- type: value
key: EnhancedVpcRouting
value: false
op: eq
actions:
- type: enable-vpc-routing
value: true
"""
schema = type_schema(
'enable-vpc-routing',
value={'type': 'boolean'})
permissions = ('redshift:ModifyCluster',)
def process(self, clusters):
with self.executor_factory(max_workers=3) as w:
futures = []
for cluster in clusters:
futures.append(w.submit(
self.process_vpc_routing,
cluster))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception changing Redshift VPC routing \n %s",
f.exception())
return clusters
def process_vpc_routing(self, cluster):
current_routing = bool(cluster.get('EnhancedVpcRouting', False))
new_routing = self.data.get('value', True)
if current_routing != new_routing:
c = local_session(self.manager.session_factory).client('redshift')
c.modify_cluster(
ClusterIdentifier=cluster['ClusterIdentifier'],
EnhancedVpcRouting=new_routing)
@actions.register('set-public-access')
class RedshiftSetPublicAccess(BaseAction):
"""
Action to set the 'PubliclyAccessible' setting on a redshift cluster
:example:
.. code-block:: yaml
policies:
- name: redshift-set-public-access
resource: redshift
filters:
- PubliclyAccessible: true
actions:
- type: set-public-access
state: false
"""
schema = type_schema(
'set-public-access',
state={'type': 'boolean'})
permissions = ('redshift:ModifyCluster',)
def set_access(self, c):
client = local_session(self.manager.session_factory).client('redshift')
client.modify_cluster(
ClusterIdentifier=c['ClusterIdentifier'],
PubliclyAccessible=self.data.get('state', False))
def process(self, clusters):
with self.executor_factory(max_workers=2) as w:
futures = {w.submit(self.set_access, c): c for c in clusters}
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception setting Redshift public access on %s \n %s",
futures[f]['ClusterIdentifier'], f.exception())
return clusters
@actions.register('mark-for-op')
class TagDelayedAction(tags.TagDelayedAction):
"""Action to create an action to be performed at a later time
:example:
.. code-block:: yaml
policies:
- name: redshift-terminate-unencrypted
resource: redshift
filters:
- "tag:custodian_cleanup": absent
- type: value
key: Encrypted
value: false
op: eq
actions:
- type: mark-for-op
tag: custodian_cleanup
op: delete
days: 5
msg: "Unencrypted Redshift cluster: {op}@{action_date}"
"""
schema = type_schema('mark-for-op', rinherit=tags.TagDelayedAction.schema)
permissions = ('redshift.CreateTags',)
def process_resource_set(self, resources, tags):
client = local_session(self.manager.session_factory).client('redshift')
for r in resources:
arn = self.manager.generate_arn(r['ClusterIdentifier'])
client.create_tags(ResourceName=arn, Tags=tags)
@actions.register('tag')
class Tag(tags.Tag):
"""Action to add tag/tags to a redshift cluster
:example:
.. code-block:: yaml
policies:
- name: redshift-tag
resource: redshift
filters:
- "tag:RedshiftTag": absent
actions:
- type: tag
key: RedshiftTag
value: "Redshift Tag Value"
"""
concurrency = 2
batch_size = 5
permissions = ('redshift:CreateTags',)
def process_resource_set(self, resources, tags):
client = local_session(self.manager.session_factory).client('redshift')
for r in resources:
arn = self.manager.generate_arn(r['ClusterIdentifier'])
client.create_tags(ResourceName=arn, Tags=tags)
@actions.register('unmark')
@actions.register('remove-tag')
class RemoveTag(tags.RemoveTag):
"""Action to remove tag/tags from a redshift cluster
:example:
.. code-block:: yaml
policies:
- name: redshift-remove-tag
resource: redshift
filters:
- "tag:RedshiftTag": present
actions:
- type: remove-tag
tags: ["RedshiftTags"]
"""
concurrency = 2
batch_size = 5
permissions = ('redshift:DeleteTags',)
def process_resource_set(self, resources, tag_keys):
client = local_session(self.manager.session_factory).client('redshift')
for r in resources:
arn = self.manager.generate_arn(r['ClusterIdentifier'])
client.delete_tags(ResourceName=arn, TagKeys=tag_keys)
@actions.register('tag-trim')
class TagTrim(tags.TagTrim):
"""Action to remove tags from a redshift cluster
This can be used to prevent reaching the ceiling limit of tags on a
resource
:example:
.. code-block:: yaml
policies:
- name: redshift-tag-trim
resource: redshift
filters:
- type: tag-count
count: 10
actions:
- type: tag-trim
space: 1
preserve:
- RequiredTag1
- RequiredTag2
"""
max_tag_count = 10
permissions = ('redshift:DeleteTags',)
def process_tag_removal(self, resource, candidates):
client = local_session(self.manager.session_factory).client('redshift')
arn = self.manager.generate_arn(resource['DBInstanceIdentifier'])
client.delete_tags(ResourceName=arn, TagKeys=candidates)
@resources.register('redshift-subnet-group')
class RedshiftSubnetGroup(QueryResourceManager):
"""Redshift subnet group."""
class resource_type(object):
service = 'redshift'
type = 'redshift-subnet-group'
id = name = 'ClusterSubnetGroupName'
enum_spec = (
'describe_cluster_subnet_groups', 'ClusterSubnetGroups', None)
filter_name = 'ClusterSubnetGroupName'
filter_type = 'scalar'
dimension = None
date = None
config_type = "AWS::Redshift::ClusterSubnetGroup"
@resources.register('redshift-snapshot')
class RedshiftSnapshot(QueryResourceManager):
"""Resource manager for Redshift snapshots.
"""
filter_registry = FilterRegistry('redshift-snapshot.filters')
action_registry = ActionRegistry('redshift-snapshot.actions')
filter_registry.register('marked-for-op', tags.TagActionFilter)
_generate_arn = None
@property
def generate_arn(self):
if self._generate_arn is None:
self._generate_arn = functools.partial(
generate_arn, 'redshift', region=self.config.region,
account_id=self.account_id, resource_type='snapshot',
separator=':')
return self._generate_arn
class resource_type(object):
service = 'redshift'
type = 'redshift-snapshot'
enum_spec = ('describe_cluster_snapshots', 'Snapshots', None)
name = id = 'SnapshotIdentifier'
filter_name = None
filter_type = None
dimension = None
date = 'SnapshotCreateTime'
config_type = "AWS::Redshift::ClusterSnapshot"
@actions.register('modify-security-groups')
class RedshiftModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
"""Modify security groups on a Redshift cluster"""
permissions = ('redshift:ModifyCluster',)
def process(self, clusters):
client = local_session(self.manager.session_factory).client('redshift')
groups = super(RedshiftModifyVpcSecurityGroups, self).get_groups(
clusters, metadata_key='VpcSecurityGroupId')
for idx, c in enumerate(clusters):
client.modify_cluster(
ClusterIdentifier=c['ClusterIdentifier'],
VpcSecurityGroupIds=groups[idx])
@RedshiftSnapshot.filter_registry.register('age')
class RedshiftSnapshotAge(AgeFilter):
"""Filters redshift snapshots based on age (in days)
:example:
.. code-block:: yaml
policies:
- name: redshift-old-snapshots
resource: redshift-snapshot
filters:
- type: age
days: 21
op: gt
"""
schema = type_schema(
'age', days={'type': 'number'},
op={'type': 'string', 'enum': list(OPERATORS.keys())})
date_attribute = 'SnapshotCreateTime'
@RedshiftSnapshot.filter_registry.register('cross-account')
class RedshiftSnapshotCrossAccount(CrossAccountAccessFilter):
"""Filter all accounts that allow access to non-whitelisted accounts
"""
permissions = ('redshift:DescribeClusterSnapshots',)
schema = type_schema(
'cross-account',
whitelist={'type': 'array', 'items': {'type': 'string'}},
whitelist_from=ValuesFrom.schema)
def process(self, snapshots, event=None):
accounts = self.get_accounts()
snapshots = [s for s in snapshots if s.get('AccountsWithRestoreAccess')]
results = []
for s in snapshots:
s_accounts = {a.get('AccountId') for a in s[
'AccountsWithRestoreAccess']}
delta_accounts = s_accounts.difference(accounts)
if delta_accounts:
s['c7n:CrossAccountViolations'] = list(delta_accounts)
results.append(s)
return results
@RedshiftSnapshot.action_registry.register('delete')
class RedshiftSnapshotDelete(BaseAction):
"""Filters redshift snapshots based on age (in days)
:example:
.. code-block:: yaml
policies:
- name: redshift-delete-old-snapshots
resource: redshift-snapshot
filters:
- type: age
days: 21
op: gt
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('redshift:DeleteClusterSnapshot',)
def process(self, snapshots):
log.info("Deleting %d Redshift snapshots", len(snapshots))
with self.executor_factory(max_workers=3) as w:
futures = []
for snapshot_set in chunks(reversed(snapshots), size=50):
futures.append(
w.submit(self.process_snapshot_set, snapshot_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception deleting snapshot set \n %s",
f.exception())
return snapshots
def process_snapshot_set(self, snapshots_set):
c = local_session(self.manager.session_factory).client('redshift')
for s in snapshots_set:
c.delete_cluster_snapshot(
SnapshotIdentifier=s['SnapshotIdentifier'],
SnapshotClusterIdentifier=s['ClusterIdentifier'])
@RedshiftSnapshot.action_registry.register('mark-for-op')
class RedshiftSnapshotTagDelayedAction(tags.TagDelayedAction):
"""Action to create a delayed actions to be performed on a redshift snapshot
:example:
.. code-block:: yaml
policies:
- name: redshift-snapshot-expiring
resource: redshift-snapshot
filters:
- "tag:custodian_cleanup": absent
- type: age
days: 14
op: eq
actions:
- type: mark-for-op
tag: custodian_cleanup
msg: "Snapshot expiring: {op}@{action_date}"
op: delete
days: 7
"""
schema = type_schema('mark-for-op', rinherit=tags.TagDelayedAction.schema)
permissions = ('redshift:CreateTags',)
def process_resource_set(self, resources, tags):
client = local_session(self.manager.session_factory).client('redshift')
for r in resources:
arn = self.manager.generate_arn(
r['ClusterIdentifier'] + '/' + r['SnapshotIdentifier'])
client.create_tags(ResourceName=arn, Tags=tags)
@RedshiftSnapshot.action_registry.register('tag')
class RedshiftSnapshotTag(tags.Tag):
"""Action to add tag/tags to a redshift snapshot
:example:
.. code-block:: yaml
policies:
- name: redshift-required-tags
resource: redshift-snapshot
filters:
- "tag:RequiredTag1": absent
actions:
- type: tag
key: RequiredTag1
value: RequiredValue1
"""
concurrency = 2
batch_size = 5
permissions = ('redshift:CreateTags',)
def process_resource_set(self, resources, tags):
client = local_session(self.manager.session_factory).client('redshift')
for r in resources:
arn = self.manager.generate_arn(
r['ClusterIdentifier'] + '/' + r['SnapshotIdentifier'])
client.create_tags(ResourceName=arn, Tags=tags)
@RedshiftSnapshot.action_registry.register('unmark')
@RedshiftSnapshot.action_registry.register('remove-tag')
class RedshiftSnapshotRemoveTag(tags.RemoveTag):
"""Action to remove tag/tags from a redshift snapshot
:example:
.. code-block:: yaml
policies:
- name: redshift-remove-tags
resource: redshift-snapshot
filters:
- "tag:UnusedTag1": present
actions:
- type: remove-tag
tags: ["UnusedTag1"]
"""
concurrency = 2
batch_size = 5
permissions = ('redshift:DeleteTags',)
def process_resource_set(self, resources, tag_keys):
client = local_session(self.manager.session_factory).client('redshift')
for r in resources:
arn = self.manager.generate_arn(
r['ClusterIdentifier'] + '/' + r['SnapshotIdentifier'])
client.delete_tags(ResourceName=arn, TagKeys=tag_keys)
@RedshiftSnapshot.action_registry.register('revoke-access')
class RedshiftSnapshotRevokeAccess(BaseAction):
"""Revokes ability of accounts to restore a snapshot
:example:
.. code-block: yaml
policies:
- name: redshift-snapshot-revoke-access
resource: redshift-snapshot
filters:
- type: cross-account
whitelist:
- 012345678910
actions:
- type: revoke-access
"""
permissions = ('redshift:RevokeSnapshotAccess',)
schema = type_schema('revoke-access')
def validate(self):
for f in self.manager.filters:
if isinstance(f, RedshiftSnapshotCrossAccount):
return self
raise PolicyValidationError(
'`revoke-access` may only be used in '
'conjunction with `cross-account` filter on %s' % (self.manager.data,))
def process_snapshot_set(self, client, snapshot_set):
for s in snapshot_set:
for a in s.get('c7n:CrossAccountViolations', []):
try:
self.manager.retry(
client.revoke_snapshot_access,
SnapshotIdentifier=s['SnapshotIdentifier'],
AccountWithRestoreAccess=a)
except ClientError as e:
if e.response['Error']['Code'] == 'ClusterSnapshotNotFound':
continue
raise
def process(self, snapshots):
client = local_session(self.manager.session_factory).client('redshift')
with self.executor_factory(max_workers=2) as w:
futures = {}
for snapshot_set in chunks(snapshots, 25):
futures[w.submit(
self.process_snapshot_set, client, snapshot_set)
] = snapshot_set
for f in as_completed(futures):
if f.exception():
self.log.exception(
'Exception while revoking access on %s: %s' % (
', '.join(
[s['SnapshotIdentifier'] for s in futures[f]]),
f.exception()))
|
|
#!/usr/bin/env python3
import argparse
from datetime import timedelta, datetime
from glob import glob
"""matplotlib gets imported when needed"""
from time import strptime, mktime
def OpenFile(InputFile):
"""Try to open the data file, raise an error if opening fails"""
try:
InputData = open(InputFile)
except IOError as e:
print("Got IOError, '{0}: {1}'".format(e.errno, e.strerror))
else:
return(InputData)
def GetTime(TimeType, PmSuspendFile):
"""Return a list of timestamps that the computer was suspended or resumed
on. If the TimeType parameter is 'suspend', search for suspend string,
if it is 'resume' search for resume string, cut the lines at
DateFieldLength value
"""
DataFile = OpenFile(PmSuspendFile)
Time = list()
for line in DataFile:
if TimeType == 'suspend':
if 'Running hooks for suspend' in line:
Time.append(line[:DateFieldLength])
elif TimeType == 'resume':
if 'Running hooks for resume' in line:
Time.append(line[:DateFieldLength])
return(Time)
def GetBatteryData(BatteryChargeFile):
"""Read the available battery charging/discharging data from the system,
this relies on the file format by upowerd, located in
/var/lib/upower/history-charge*.dat file
"""
ChargeFile = OpenFile(BatteryChargeFile)
TimeBatteryCharge = list()
for line in ChargeFile:
a, b, c = line.split()
a = datetime.fromtimestamp(int(a)).strftime('%d.%m.%Y %H:%M:%S')
TimeBatteryCharge.append([a, b, c])
return(TimeBatteryCharge)
def PrintBatteryConsole(BatteryChargeFile):
"""Print out the gathered battery charging/discharging information"""
ChargeInfo = GetBatteryData(BatteryChargeFile)
Time = list()
Battery = list()
State = list()
for elem in ChargeInfo:
Time.append(elem[0])
Battery.append(elem[1])
State.append(elem[2])
print('{0:5}\t{1:20}\t{2:10}\t{3:20}'.format('Index', 'Time',
'Percentage', 'State'))
if len(Time) == len(Battery) and len(Time) == len(State):
for index, (a, b, c) in enumerate(zip(Time, Battery, State)):
print('{0:5}\t{1:20}\t{2:10}\t{3:20}'.format(index, a, b, c))
def GetDuration(PmSuspendFile):
"""Calculate the time the computer spent in suspend, returns Duration,
SuspendTime and TimeDelta
"""
Duration = list()
SuspendTime = GetTime('suspend', PmSuspendFile)
ResumeTime = GetTime('resume', PmSuspendFile)
for index, (a, b) in enumerate(zip(SuspendTime, ResumeTime)):
elema = mktime(strptime(a, DateFormat))
elemb = mktime(strptime(b, DateFormat))
elemc = timedelta(seconds=elemb - elema)
Duration.append(elemc)
"""Reformat the dates so they match those we use for battery
information display"""
SuspendTime[index] = datetime.fromtimestamp(int(elema)).strftime(
'%d.%m.%Y %H:%M:%S')
ResumeTime[index] = datetime.fromtimestamp(int(elemb)).strftime(
'%d.%m.%Y %H:%M:%S')
return(Duration, SuspendTime, ResumeTime)
def PrintConsole(PmSuspendFile):
"""Print the gathered info out to console"""
SuspendDuration, SuspendTime, ResumeTime = GetDuration(PmSuspendFile)
if len(SuspendTime) == len(ResumeTime):
print('{0:5}\t{1:20}\t{2:20}\t{3:10}'.format('Index', 'Suspend time',
'Resume time', 'Duration'))
for index, (suspend, resume, duration) in enumerate(
zip(SuspendTime, ResumeTime, SuspendDuration)):
print('{0:5}\t{1:20}\t{2:20}\t{3:10}'.format(index, suspend,
resume,
str(duration)))
else:
print("The length of SuspendTime and ResumeTime lists differs!")
def DrawGraph(PmSuspendFile):
"""Draw a suspend graph using matplotlib"""
from matplotlib import pyplot
SuspendDuration, SuspendTime, ResumeTime = GetDuration(PmSuspendFile)
NewDuration = list()
NewSuspendTime = list()
for (elema, elemb, elemc) in zip(SuspendDuration, SuspendTime, ResumeTime):
"""y is NewDuration, x is NewSuspendTime
insert the values for y twice in a row in order to get the same data
points for suspend and resume x point
"""
NewDuration.append(datetime.strptime(str(elema), '%H:%M:%S'))
NewDuration.append(datetime.strptime(str(elema), '%H:%M:%S'))
NewSuspendTime.append(datetime.strptime(elemb, '%d.%m.%Y %H:%M:%S'))
NewSuspendTime.append(datetime.strptime(elemc, '%d.%m.%Y %H:%M:%S'))
pyplot.plot(NewSuspendTime, NewDuration)
pyplot.xlabel('Date/time')
pyplot.ylabel('Duration')
pyplot.title('Suspend graph')
pyplot.show()
def DrawBatteryGraph(BatteryChargeFile):
"""Draw a battery graph using matplotlib"""
from matplotlib import pyplot
ChargeInfo = GetBatteryData(BatteryChargeFile)
Time = list()
Battery = list()
for elem in ChargeInfo:
Time.append(datetime.strptime(elem[0], '%d.%m.%Y %H:%M:%S'))
Battery.append(elem[1])
pyplot.plot(Time, Battery)
pyplot.xlabel('Date/time')
pyplot.ylabel('Percentage')
pyplot.title('Battery graph')
pyplot.show()
def DrawAllGraphs(BatteryChargeFile, PmSuspendFile):
"""Draw both the battery charge and the suspend graph"""
from matplotlib import pyplot
ChargeInfo = GetBatteryData(BatteryChargeFile)
Time = list()
Battery = list()
TimeCompare = set()
for elem in ChargeInfo:
Time.append(datetime.strptime(elem[0], '%d.%m.%Y %H:%M:%S'))
TimeCompare.add(datetime.strptime(elem[0][:10], '%d.%m.%Y'))
Battery.append(elem[1])
pyplot.subplot(2, 1, 1)
pyplot.plot(Time, Battery, marker='x', linestyle='--', color='r')
SuspendDuration, SuspendTime, ResumeTime = GetDuration(PmSuspendFile)
NewDuration = list()
NewSuspendTime = list()
for (elema, elemb, elemc) in zip(SuspendDuration, SuspendTime,
ResumeTime):
vala = datetime.strptime(elemb[:10], '%d.%m.%Y')
valb = datetime.strptime(elemc[:10], '%d.%m.%Y')
if vala in TimeCompare:
NewSuspendTime.append(datetime.strptime(elemb,
'%d.%m.%Y %H:%M:%S'))
NewDuration.append(datetime.strptime(str(elema), '%H:%M:%S'))
if valb in TimeCompare:
NewSuspendTime.append(datetime.strptime(elemc,
'%d.%m.%Y %H:%M:%S'))
NewDuration.append(datetime.strptime(str(elema), '%H:%M:%S'))
pyplot.subplot(2, 1, 2)
pyplot.plot(NewSuspendTime, NewDuration)
pyplot.show()
def DoIt():
"""
Set up the available program options
Call the proper functions with proper parameters depending on user
input
"""
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--all', dest='All', help='Draw both the '
'battery and suspend graph', action='store_true')
parser.add_argument('-b', '--battery', dest='Battery', help='Show the '
'battery charging/discharging information',
action='store_true')
parser.add_argument('-d', '--date', dest='DateFormat', help='Specify '
'the date format', default='%a %b %d %H:%M:%S %Z %Y',
type=str, action='store')
parser.add_argument('-f', '--file', dest='PmSuspendFile', help='Specify '
'which file name to parse',
default='/var/log/pm-suspend.log', type=str,
action='store')
parser.add_argument('-l', '--length', dest='DateFieldLength',
help='Specify the length of the date field in the '
'file', default=29, type=int, action='store')
parser.add_argument('-o', '--output', dest='Output', help='Specify the '
'output destination', default='console', type=str,
action='store')
args = parser.parse_args()
if not args.DateFormat:
parser.print_help()
else:
global DateFormat
DateFormat = args.DateFormat
if not args.DateFieldLength:
parser.print_help()
else:
global DateFieldLength
DateFieldLength = args.DateFieldLength
if args.All:
BatteryInfoFile = glob('/var/lib/upower/history-charge-*.dat')
if args.Output == 'graph':
DrawAllGraphs(BatteryInfoFile[0], args.PmSuspendFile)
elif args.Output == 'console':
PrintConsole(args.PmSuspendFile)
PrintBatteryConsole(BatteryInfoFile[0])
else:
if args.Battery:
BatteryInfoFile = glob('/var/lib/upower/history-charge-*.dat')
if args.Output == 'console':
PrintBatteryConsole(BatteryInfoFile[0])
elif args.Output == 'graph':
DrawBatteryGraph(BatteryInfoFile[0])
else:
if args.Output == 'console':
PrintConsole(args.PmSuspendFile)
elif args.Output == 'graph':
DrawGraph(args.PmSuspendFile)
if __name__ == "__main__":
DoIt()
|
|
import re
from xml.sax.saxutils import escape, unescape
from tokenizer import HTMLTokenizer
from constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figure', 'footer', 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'keygen', 'kbd',
'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', 'multicol',
'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', 'p', 'pre',
'progress', 'q', 's', 'samp', 'section', 'select', 'small', 'sound',
'source', 'spacer', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead', 'tr', 'tt',
'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg',
'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = [ 'ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs' ]
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in tokenTypes.keys():
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
if token.has_key("data"):
attrs = dict([(name,val) for name,val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if not attrs.has_key(attr):
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
#remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace(u"\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:",val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if attrs.has_key('style'):
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name,val] for name,val in attrs.items()]
return token
else:
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k,escape(v)) for k,v in token["data"]])
token["data"] = "<%s%s>" % (token["name"],attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"]=token["data"][:-1] + "/>"
if token["type"] in tokenTypes.keys():
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def sanitize_css(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$",keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False):
#Change case matching defaults as we only output lowercase html anyway
#This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
|
|
import ctypes
import mock
import unittest
import sys
import warnings
from contextlib import contextmanager
from nose import SkipTest
import nifpga
from nifpga.statuscheckedlibrary import (check_status,
NamedArgtype,
LibraryFunctionInfo,
LibraryNotFoundError,
StatusCheckedLibrary)
python_version = 3 if sys.version_info >= (3, 0) else 2
def raise_an_exception():
"""
A helper for NiFpgaStatusExceptionTest
"""
session = ctypes.c_int32(0x0000beef)
fifo = ctypes.c_uint32(0x0000f1f0)
data = ctypes.c_uint64(0x0000da7a)
number_of_elements = ctypes.c_size_t(0x100)
timeout_ms = ctypes.c_size_t(0x200)
elements_remaining = ctypes.c_size_t(0x300)
bogus_string_argument = ctypes.c_char_p(b"I am a string")
exception = nifpga.FifoTimeoutError(
function_name="Dummy Function Name",
argument_names=["session",
"fifo",
"data",
"number of elements",
"timeout ms",
"elements remaining",
"a bogus string argument"],
function_args=(session,
fifo,
data,
number_of_elements,
timeout_ms,
elements_remaining,
bogus_string_argument))
raise exception
class NiFpgaStatusExceptionTest(unittest.TestCase):
def test_autogenerated_status_warning_and_error_classes_exist(self):
nifpga.FifoTimeoutWarning
nifpga.FifoTimeoutError
def test_can_get_arguments_from_exception(self):
try:
raise_an_exception()
self.fail("An exception should have been raised")
except nifpga.FifoTimeoutError as e:
self.assertEqual(-50400, e.get_code())
self.assertEqual("FifoTimeout", e.get_code_string())
self.assertEqual("Dummy Function Name", e.get_function_name())
args = e.get_args()
self.assertEqual(args["session"], 0x0000beef)
self.assertEqual(args["fifo"], 0x0000f1f0)
self.assertEqual(args["data"], 0x0000da7a)
self.assertEqual(args["number of elements"], 0x100)
self.assertEqual(args["timeout ms"], 0x200)
self.assertEqual(args["elements remaining"], 0x300)
self.assertEqual(args["a bogus string argument"], b"I am a string")
# Spot check a couple different types of args in the
# printed string that should be helpful for readability
exception_str = str(e)
# numbers in hex!
self.assertIn("session: 0xbeef", exception_str)
# strings have single quotes around them
if python_version == 2:
self.assertIn("a bogus string argument: 'I am a string'", exception_str)
else:
self.assertIn("a bogus string argument: b'I am a string'", exception_str)
def test_status_exceptions_can_be_pickled_across_processes(self):
try:
import jobrunner
except ImportError:
raise SkipTest("jobrunner not installed, skipping")
runner = jobrunner.JobRunner(jobrunner.JobRunner.RUN_MODE_MULTIPROCESS,
runnables=[raise_an_exception],
auto_assert=False)
result = runner.run()[0]
self.assertTrue(result.exception_occured())
self.assertEqual(str(result.err_type), str(nifpga.FifoTimeoutError))
self.assertIn("session: 0xbeef", result.err_class)
if python_version == 2:
self.assertIn("a bogus string argument: 'I am a string'", result.err_class)
else:
self.assertIn("a bogus string argument: b'I am a string'", result.err_class)
@check_status(function_name="Fake Function Name", argument_names=["code"])
def return_a_checked_status(code):
"""
A helper for CheckStatusTest
"""
return code
@contextmanager
def assert_warns(warning):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield
# verify the warning occured
assert len(w) == 1
assert isinstance(w[0].message, warning)
class CheckStatusTest(unittest.TestCase):
def test_success(self):
return_a_checked_status(0)
def test_get_known_error(self):
with self.assertRaises(nifpga.FifoTimeoutError):
return_a_checked_status(-50400)
def test_get_known_warning(self):
with assert_warns(nifpga.FifoTimeoutWarning):
return_a_checked_status(50400)
def test_get_unknown_error(self):
with self.assertRaises(nifpga.UnknownError):
return_a_checked_status(-1)
def test_get_unknown_warning(self):
with assert_warns(nifpga.UnknownWarning):
return_a_checked_status(1)
class StatusCheckedLibraryTestCRunTime(unittest.TestCase):
"""
Since we can't load NiFpga on a dev machine unless we have all its
dependencies installed (i.e. a bunch of NI software we don't want on
a dev machine), we'll cheat and use the C runtime library and
atoi. atoi doesn't really return a NiFpga_Status, but we can pretend.
"""
def setUp(self):
self._c_runtime = StatusCheckedLibrary(
"c",
library_function_infos=[
LibraryFunctionInfo(
pretty_name="c_atoi",
name_in_library="atoi",
named_argtypes=[
NamedArgtype("nptr", ctypes.c_char_p),
])
])
def test_success(self):
self._c_runtime.c_atoi(b"0")
self._c_runtime["c_atoi"](b"0")
def test_get_unknown_error(self):
with self.assertRaises(nifpga.UnknownError):
self._c_runtime.c_atoi(b"-1")
def test_get_unknown_warning(self):
with warnings.catch_warnings(record=True) as w:
self._c_runtime.c_atoi(b"1")
assert len(w) == 1
warning = w[0].message
# Make sure all this propagates into the warning.
self.assertEqual(1, warning.get_code())
self.assertEqual(b"1", warning.get_args()["nptr"])
# These make the warning message readable
self.assertIn("atoi", str(warning))
if python_version == 2:
self.assertIn("nptr: '1'", str(warning))
else:
self.assertIn("nptr: b'1'", str(warning))
class StatusCheckedLibraryTestFunctionDoesntExist(unittest.TestCase):
"""
New versions of NiFpga will have new functions. We want the API to support
old versions of NiFpga without erroring because it can't find certain symbols.
So StatusCheckedLibrary will return VersionMismatchError for symbols it can't
find.
"""
def setUp(self):
self._c_runtime = StatusCheckedLibrary(
"c",
library_function_infos=[
LibraryFunctionInfo(
pretty_name="DoesntExist",
name_in_library="functionThatDoesntExist",
named_argtypes=[
NamedArgtype("nptr", ctypes.c_char_p),
])
])
def test_correct_error(self):
with self.assertRaises(nifpga.VersionMismatchError):
self._c_runtime.DoesntExist(b"0")
with self.assertRaises(nifpga.VersionMismatchError):
self._c_runtime["DoesntExist"](b"0")
class StatusCheckedLibraryTestMockedLibrary(unittest.TestCase):
"""
Since we can't load NiFpga on a dev machine unless we have all its
dependencies installed (i.e. a bunch of NI software we don't want on
a dev machine), we'll monkey patch and use mocked libraries.
"""
# so nose shows test names instead of docstrings
def shortDescription(self):
return None
@mock.patch('nifpga.statuscheckedlibrary.ctypes.util.find_library')
@mock.patch('nifpga.statuscheckedlibrary.ctypes.cdll')
def setUp(self, mock_cdll, mock_find_library):
"""
Setup up self._library so that self._library.AwesomeFunction(int, str)
can be called, and the return value can be changed by setting
self._mock_awesome_function.return_value.
"""
mock_loaded_library = mock.Mock()
mock_cdll.LoadLibrary.return_value = mock_loaded_library
self._mock_awesome_function = mock.Mock()
self._mock_awesome_function.__name__ = "Entrypoint_AwesomeFunction"
mock_loaded_library.Entrypoint_AwesomeFunction = self._mock_awesome_function
self._library = StatusCheckedLibrary(
library_name="CoolLibrary",
library_function_infos=[
LibraryFunctionInfo(
pretty_name="AwesomeFunction",
name_in_library="Entrypoint_AwesomeFunction",
named_argtypes=[NamedArgtype("some_integer", ctypes.c_uint32),
NamedArgtype("some_string", ctypes.c_char_p)])
])
def test_good_error_message_from_memory_full_error(self):
""" Tests a good error message from a library call that fails.
1. Correctly converts -52000 to NiFpgaMemoryFullError
2. An integer arg gets printed as hex (easier to debug than decimal)
3. A string arg gets printed with quotes surrounding it (so it's obviously a string)
"""
self._mock_awesome_function.return_value = -52000
try:
self._library.AwesomeFunction(ctypes.c_uint32(33), ctypes.c_char_p(b"2"))
self.fail("AwesomeFunction should have raised MemoryFull")
except nifpga.MemoryFullError as e:
if python_version == 2:
self.assertEqual(
"Error: MemoryFull (-52000) when calling 'Entrypoint_AwesomeFunction' with arguments:"
"\n\tsome_integer: 0x21L"
"\n\tsome_string: '2'", str(e))
else:
self.assertEqual(
"Error: MemoryFull (-52000) when calling 'Entrypoint_AwesomeFunction' with arguments:"
"\n\tsome_integer: 0x21"
"\n\tsome_string: b'2'", str(e))
def test_success_when_library_function_is_success(self):
""" Tests that a 0 status return value does not raise any errors. """
self._mock_awesome_function.return_value = 0
self._library.AwesomeFunction(ctypes.c_uint32(33), ctypes.c_char_p(b"2"))
def test_good_error_message_if_wrong_number_of_arguments(self):
""" Tests that calling a function with wrong number of arguments is error """
try:
self._library.AwesomeFunction(ctypes.c_uint32(33))
self.fail("AwesomeFunction should have raised TypeError")
except TypeError as e:
self.assertEqual("Entrypoint_AwesomeFunction takes exactly 2 arguments (1 given)", str(e))
class NiFpgaTest(unittest.TestCase):
def test_that_we_at_least_get_to_try_loading_library(self):
# We can't do much without NiFpga and other NI software actually
# being installed, but on a dev machine we can at least
# catch a few more errors by trying to creating a NiFpga instance and
# expect to fail when the library can't be found.
try:
nifpga.nifpga._NiFpga()
except LibraryNotFoundError:
pass
|
|
"""
Binary serialization
NPY format
==========
A simple format for saving numpy arrays to disk with the full
information about them.
The ``.npy`` format is the standard binary file format in NumPy for
persisting a *single* arbitrary NumPy array on disk. The format stores all
of the shape and dtype information necessary to reconstruct the array
correctly even on another machine with a different architecture.
The format is designed to be as simple as possible while achieving
its limited goals.
The ``.npz`` format is the standard format for persisting *multiple* NumPy
arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
files, one for each array.
Capabilities
------------
- Can represent all NumPy arrays including nested record arrays and
object arrays.
- Represents the data in its native binary form.
- Supports Fortran-contiguous arrays directly.
- Stores all of the necessary information to reconstruct the array
including shape and dtype on a machine of a different
architecture. Both little-endian and big-endian arrays are
supported, and a file with little-endian numbers will yield
a little-endian array on any machine reading the file. The
types are described in terms of their actual sizes. For example,
if a machine with a 64-bit C "long int" writes out an array with
"long ints", a reading machine with 32-bit C "long ints" will yield
an array with 64-bit integers.
- Is straightforward to reverse engineer. Datasets often live longer than
the programs that created them. A competent developer should be
able to create a solution in their preferred programming language to
read most ``.npy`` files that he has been given without much
documentation.
- Allows memory-mapping of the data. See `open_memmep`.
- Can be read from a filelike stream object instead of an actual file.
- Stores object arrays, i.e. arrays containing elements that are arbitrary
Python objects. Files with object arrays are not to be mmapable, but
can be read and written to disk.
Limitations
-----------
- Arbitrary subclasses of numpy.ndarray are not completely preserved.
Subclasses will be accepted for writing, but only the array data will
be written out. A regular numpy.ndarray object will be created
upon reading the file.
.. warning::
Due to limitations in the interpretation of structured dtypes, dtypes
with fields with empty names will have the names replaced by 'f0', 'f1',
etc. Such arrays will not round-trip through the format entirely
accurately. The data is intact; only the field names will differ. We are
working on a fix for this. This fix will not require a change in the
file format. The arrays with such structures can still be saved and
restored, and the correct dtype may be restored by using the
``loadedarray.view(correct_dtype)`` method.
File extensions
---------------
We recommend using the ``.npy`` and ``.npz`` extensions for files saved
in this format. This is by no means a requirement; applications may wish
to use these file formats but use an extension specific to the
application. In the absence of an obvious alternative, however,
we suggest using ``.npy`` and ``.npz``.
Version numbering
-----------------
The version numbering of these formats is independent of NumPy version
numbering. If the format is upgraded, the code in `numpy.io` will still
be able to read and write Version 1.0 files.
Format Version 1.0
------------------
The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
The next 1 byte is an unsigned byte: the major version number of the file
format, e.g. ``\\x01``.
The next 1 byte is an unsigned byte: the minor version number of the file
format, e.g. ``\\x00``. Note: the version of the file format is not tied
to the version of the numpy package.
The next 2 bytes form a little-endian unsigned short int: the length of
the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data describing the array's
format. It is an ASCII string which contains a Python literal expression
of a dictionary. It is terminated by a newline (``\\n``) and padded with
spaces (``\\x20``) to make the total of
``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
by 64 for alignment purposes.
The dictionary contains three keys:
"descr" : dtype.descr
An object that can be passed as an argument to the `numpy.dtype`
constructor to create the array's dtype.
"fortran_order" : bool
Whether the array data is Fortran-contiguous or not. Since
Fortran-contiguous arrays are a common form of non-C-contiguity,
we allow them to be written directly to disk for efficiency.
"shape" : tuple of int
The shape of the array.
For repeatability and readability, the dictionary keys are sorted in
alphabetic order. This is for convenience only. A writer SHOULD implement
this if possible. A reader MUST NOT depend on this.
Following the header comes the array data. If the dtype contains Python
objects (i.e. ``dtype.hasobject is True``), then the data is a Python
pickle of the array. Otherwise the data is the contiguous (either C-
or Fortran-, depending on ``fortran_order``) bytes of the array.
Consumers can figure out the number of bytes by multiplying the number
of elements given by the shape (noting that ``shape=()`` means there is
1 element) by ``dtype.itemsize``.
Format Version 2.0
------------------
The version 1.0 format only allowed the array header to have a total size of
65535 bytes. This can be exceeded by structured arrays with a large number of
columns. The version 2.0 format extends the header size to 4 GiB.
`numpy.save` will automatically save in 2.0 format if the data requires it,
else it will always use the more compatible 1.0 format.
The description of the fourth element of the header therefore has become:
"The next 4 bytes form a little-endian unsigned int: the length of the header
data HEADER_LEN."
Format Version 3.0
------------------
This version replaces the ASCII string (which in practice was latin1) with
a utf8-encoded string, so supports structured types with any unicode field
names.
Notes
-----
The ``.npy`` format, including motivation for creating it and a comparison of
alternatives, is described in the `"npy-format" NEP
<https://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have
evolved with time and this document is more current.
"""
from __future__ import division, absolute_import, print_function
import numpy
import sys
import io
import warnings
from numpy.lib.utils import safe_eval
from numpy.compat import (
isfileobj, long, os_fspath, pickle
)
__all__ = []
MAGIC_PREFIX = b'\x93NUMPY'
MAGIC_LEN = len(MAGIC_PREFIX) + 2
ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
# instead of 2 bytes (H) allowing storage of large structured arrays
_header_size_info = {
(1, 0): ('<H', 'latin1'),
(2, 0): ('<I', 'latin1'),
(3, 0): ('<I', 'utf8'),
}
def _check_version(version):
if version not in [(1, 0), (2, 0), (3, 0), None]:
msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
raise ValueError(msg % (version,))
def magic(major, minor):
""" Return the magic string for the given file format version.
Parameters
----------
major : int in [0, 255]
minor : int in [0, 255]
Returns
-------
magic : str
Raises
------
ValueError if the version cannot be formatted.
"""
if major < 0 or major > 255:
raise ValueError("major version must be 0 <= major < 256")
if minor < 0 or minor > 255:
raise ValueError("minor version must be 0 <= minor < 256")
if sys.version_info[0] < 3:
return MAGIC_PREFIX + chr(major) + chr(minor)
else:
return MAGIC_PREFIX + bytes([major, minor])
def read_magic(fp):
""" Read the magic string to get the version of the file format.
Parameters
----------
fp : filelike object
Returns
-------
major : int
minor : int
"""
magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
if magic_str[:-2] != MAGIC_PREFIX:
msg = "the magic string is not correct; expected %r, got %r"
raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
if sys.version_info[0] < 3:
major, minor = map(ord, magic_str[-2:])
else:
major, minor = magic_str[-2:]
return major, minor
def dtype_to_descr(dtype):
"""
Get a serializable descriptor from the dtype.
The .descr attribute of a dtype object cannot be round-tripped through
the dtype() constructor. Simple types, like dtype('float32'), have
a descr which looks like a record array with one field with '' as
a name. The dtype() constructor interprets this as a request to give
a default name. Instead, we construct descriptor that can be passed to
dtype().
Parameters
----------
dtype : dtype
The dtype of the array that will be written to disk.
Returns
-------
descr : object
An object that can be passed to `numpy.dtype()` in order to
replicate the input dtype.
"""
if dtype.names is not None:
# This is a record array. The .descr is fine. XXX: parts of the
# record array with an empty name, like padding bytes, still get
# fiddled with. This needs to be fixed in the C implementation of
# dtype().
return dtype.descr
else:
return dtype.str
def descr_to_dtype(descr):
'''
descr may be stored as dtype.descr, which is a list of
(name, format, [shape]) tuples where format may be a str or a tuple.
Offsets are not explicitly saved, rather empty fields with
name, format == '', '|Vn' are added as padding.
This function reverses the process, eliminating the empty padding fields.
'''
if isinstance(descr, str):
# No padding removal needed
return numpy.dtype(descr)
elif isinstance(descr, tuple):
# subtype, will always have a shape descr[1]
dt = descr_to_dtype(descr[0])
return numpy.dtype((dt, descr[1]))
fields = []
offset = 0
for field in descr:
if len(field) == 2:
name, descr_str = field
dt = descr_to_dtype(descr_str)
else:
name, descr_str, shape = field
dt = numpy.dtype((descr_to_dtype(descr_str), shape))
# Ignore padding bytes, which will be void bytes with '' as name
# Once support for blank names is removed, only "if name == ''" needed)
is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
if not is_pad:
fields.append((name, dt, offset))
offset += dt.itemsize
names, formats, offsets = zip(*fields)
# names may be (title, names) tuples
nametups = (n if isinstance(n, tuple) else (None, n) for n in names)
titles, names = zip(*nametups)
return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
'offsets': offsets, 'itemsize': offset})
def header_data_from_array_1_0(array):
""" Get the dictionary of header metadata from a numpy.ndarray.
Parameters
----------
array : numpy.ndarray
Returns
-------
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
"""
d = {'shape': array.shape}
if array.flags.c_contiguous:
d['fortran_order'] = False
elif array.flags.f_contiguous:
d['fortran_order'] = True
else:
# Totally non-contiguous data. We will have to make it C-contiguous
# before writing. Note that we need to test for C_CONTIGUOUS first
# because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
d['fortran_order'] = False
d['descr'] = dtype_to_descr(array.dtype)
return d
def _wrap_header(header, version):
"""
Takes a stringified header, and attaches the prefix and padding to it
"""
import struct
assert version is not None
fmt, encoding = _header_size_info[version]
if not isinstance(header, bytes): # always true on python 3
header = header.encode(encoding)
hlen = len(header) + 1
padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
try:
header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
except struct.error:
msg = "Header length {} too big for version={}".format(hlen, version)
raise ValueError(msg)
# Pad the header with spaces and a final newline such that the magic
# string, the header-length short and the header are aligned on a
# ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
# aligned up to ARRAY_ALIGN on systems like Linux where mmap()
# offset must be page-aligned (i.e. the beginning of the file).
return header_prefix + header + b' '*padlen + b'\n'
def _wrap_header_guess_version(header):
"""
Like `_wrap_header`, but chooses an appropriate version given the contents
"""
try:
return _wrap_header(header, (1, 0))
except ValueError:
pass
try:
ret = _wrap_header(header, (2, 0))
except UnicodeEncodeError:
pass
else:
warnings.warn("Stored array in format 2.0. It can only be"
"read by NumPy >= 1.9", UserWarning, stacklevel=2)
return ret
header = _wrap_header(header, (3, 0))
warnings.warn("Stored array in format 3.0. It can only be "
"read by NumPy >= 1.17", UserWarning, stacklevel=2)
return header
def _write_array_header(fp, d, version=None):
""" Write the header for an array and returns the version used
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
version: tuple or None
None means use oldest that works
explicit version will raise a ValueError if the format does not
allow saving this data. Default: None
"""
header = ["{"]
for key, value in sorted(d.items()):
# Need to use repr here, since we eval these when reading
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
header = _filter_header(header)
if version is None:
header = _wrap_header_guess_version(header)
else:
header = _wrap_header(header, version)
fp.write(header)
def write_array_header_1_0(fp, d):
""" Write the header for an array using the 1.0 format.
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (1, 0))
def write_array_header_2_0(fp, d):
""" Write the header for an array using the 2.0 format.
The 2.0 format allows storing very large structured arrays.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (2, 0))
def read_array_header_1_0(fp):
"""
Read an array header from a filelike object using the 1.0 file format
version.
This will leave the file object located just after the header.
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
shape : tuple of int
The shape of the array.
fortran_order : bool
The array data will be written out directly if it is either
C-contiguous or Fortran-contiguous. Otherwise, it will be made
contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
Raises
------
ValueError
If the data is invalid.
"""
return _read_array_header(fp, version=(1, 0))
def read_array_header_2_0(fp):
"""
Read an array header from a filelike object using the 2.0 file format
version.
This will leave the file object located just after the header.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
shape : tuple of int
The shape of the array.
fortran_order : bool
The array data will be written out directly if it is either
C-contiguous or Fortran-contiguous. Otherwise, it will be made
contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
Raises
------
ValueError
If the data is invalid.
"""
return _read_array_header(fp, version=(2, 0))
def _filter_header(s):
"""Clean up 'L' in npz header ints.
Cleans up the 'L' in strings representing integers. Needed to allow npz
headers produced in Python2 to be read in Python3.
Parameters
----------
s : string
Npy file header.
Returns
-------
header : str
Cleaned up header.
"""
import tokenize
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
tokens = []
last_token_was_number = False
# adding newline as python 2.7.5 workaround
string = s + "\n"
for token in tokenize.generate_tokens(StringIO(string).readline):
token_type = token[0]
token_string = token[1]
if (last_token_was_number and
token_type == tokenize.NAME and
token_string == "L"):
continue
else:
tokens.append(token)
last_token_was_number = (token_type == tokenize.NUMBER)
# removing newline (see above) as python 2.7.5 workaround
return tokenize.untokenize(tokens)[:-1]
def _read_array_header(fp, version):
"""
see read_array_header_1_0
"""
# Read an unsigned, little-endian short int which has the length of the
# header.
import struct
hinfo = _header_size_info.get(version)
if hinfo is None:
raise ValueError("Invalid version {!r}".format(version))
hlength_type, encoding = hinfo
hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
header_length = struct.unpack(hlength_type, hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
header = header.decode(encoding)
# The header is a pretty-printed string representation of a literal
# Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
# boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
# "descr" : dtype.descr
header = _filter_header(header)
try:
d = safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse header: {!r}\nException: {!r}"
raise ValueError(msg.format(header, e))
if not isinstance(d, dict):
msg = "Header is not a dictionary: {!r}"
raise ValueError(msg.format(d))
keys = sorted(d.keys())
if keys != ['descr', 'fortran_order', 'shape']:
msg = "Header does not contain the correct keys: {!r}"
raise ValueError(msg.format(keys))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not numpy.all([isinstance(x, (int, long)) for x in d['shape']])):
msg = "shape is not valid: {!r}"
raise ValueError(msg.format(d['shape']))
if not isinstance(d['fortran_order'], bool):
msg = "fortran_order is not a valid bool: {!r}"
raise ValueError(msg.format(d['fortran_order']))
try:
dtype = descr_to_dtype(d['descr'])
except TypeError as e:
msg = "descr is not a valid dtype descriptor: {!r}"
raise ValueError(msg.format(d['descr']))
return d['shape'], d['fortran_order'], dtype
def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
"""
Write an array to an NPY file, including a header.
If the array is neither C-contiguous nor Fortran-contiguous AND the
file_like object is not a real file object, this function will have to
copy data in memory.
Parameters
----------
fp : file_like object
An open, writable file object, or similar object with a
``.write()`` method.
array : ndarray
The array to write to disk.
version : (int, int) or None, optional
The version number of the format. None means use the oldest
supported version that is able to store the data. Default: None
allow_pickle : bool, optional
Whether to allow writing pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass to pickle.dump, excluding
'protocol'. These are only useful when pickling objects in object
arrays on Python 3 to Python 2 compatible format.
Raises
------
ValueError
If the array cannot be persisted. This includes the case of
allow_pickle=False and array being an object array.
Various other errors
If the array contains Python objects as part of its dtype, the
process of pickling them may raise various errors if the objects
are not picklable.
"""
_check_version(version)
_write_array_header(fp, header_data_from_array_1_0(array), version)
if array.itemsize == 0:
buffersize = 0
else:
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data
# directly. Instead, we will pickle it out
if not allow_pickle:
raise ValueError("Object arrays cannot be saved when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
pickle.dump(array, fp, protocol=3, **pickle_kwargs)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
array.T.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='F'):
fp.write(chunk.tobytes('C'))
else:
if isfileobj(fp):
array.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='C'):
fp.write(chunk.tobytes('C'))
def read_array(fp, allow_pickle=False, pickle_kwargs=None):
"""
Read an array from an NPY file.
Parameters
----------
fp : file_like object
If this is not a real file object, then this may take extra memory
and time.
allow_pickle : bool, optional
Whether to allow writing pickled data. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
pickle_kwargs : dict
Additional keyword arguments to pass to pickle.load. These are only
useful when loading object arrays saved on Python 2 when using
Python 3.
Returns
-------
array : ndarray
The array from the data on disk.
Raises
------
ValueError
If the data is invalid, or allow_pickle=False and the file contains
an object array.
"""
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(fp, version)
if len(shape) == 0:
count = 1
else:
count = numpy.multiply.reduce(shape, dtype=numpy.int64)
# Now read the actual data.
if dtype.hasobject:
# The array contained Python objects. We need to unpickle the data.
if not allow_pickle:
raise ValueError("Object arrays cannot be loaded when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
try:
array = pickle.load(fp, **pickle_kwargs)
except UnicodeError as err:
if sys.version_info[0] >= 3:
# Friendlier error message
raise UnicodeError("Unpickling a python object failed: %r\n"
"You may need to pass the encoding= option "
"to numpy.load" % (err,))
raise
else:
if isfileobj(fp):
# We can use the fast fromfile() function.
array = numpy.fromfile(fp, dtype=dtype, count=count)
else:
# This is not a real file. We have to read it the
# memory-intensive way.
# crc32 module fails on reads greater than 2 ** 32 bytes,
# breaking large reads from gzip streams. Chunk reads to
# BUFFER_SIZE bytes to avoid issue and reduce memory overhead
# of the read. In non-chunked case count < max_read_count, so
# only one read is performed.
# Use np.ndarray instead of np.empty since the latter does
# not correctly instantiate zero-width string dtypes; see
# https://github.com/numpy/numpy/pull/6430
array = numpy.ndarray(count, dtype=dtype)
if dtype.itemsize > 0:
# If dtype.itemsize == 0 then there's nothing more to read
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
for i in range(0, count, max_read_count):
read_count = min(max_read_count, count - i)
read_size = int(read_count * dtype.itemsize)
data = _read_bytes(fp, read_size, "array data")
array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
count=read_count)
if fortran_order:
array.shape = shape[::-1]
array = array.transpose()
else:
array.shape = shape
return array
def open_memmap(filename, mode='r+', dtype=None, shape=None,
fortran_order=False, version=None):
"""
Open a .npy file as a memory-mapped array.
This may be used to read an existing file or create a new one.
Parameters
----------
filename : str or path-like
The name of the file on disk. This may *not* be a file-like
object.
mode : str, optional
The mode in which to open the file; the default is 'r+'. In
addition to the standard file modes, 'c' is also accepted to mean
"copy on write." See `memmap` for the available mode strings.
dtype : data-type, optional
The data type of the array if we are creating a new file in "write"
mode, if not, `dtype` is ignored. The default value is None, which
results in a data-type of `float64`.
shape : tuple of int
The shape of the array if we are creating a new file in "write"
mode, in which case this parameter is required. Otherwise, this
parameter is ignored and is thus optional.
fortran_order : bool, optional
Whether the array should be Fortran-contiguous (True) or
C-contiguous (False, the default) if we are creating a new file in
"write" mode.
version : tuple of int (major, minor) or None
If the mode is a "write" mode, then this is the version of the file
format used to create the file. None means use the oldest
supported version that is able to store the data. Default: None
Returns
-------
marray : memmap
The memory-mapped array.
Raises
------
ValueError
If the data or the mode is invalid.
IOError
If the file is not found or cannot be opened correctly.
See Also
--------
memmap
"""
if isfileobj(filename):
raise ValueError("Filename must be a string or a path-like object."
" Memmap cannot use existing file handles.")
if 'w' in mode:
# We are creating the file, not reading it.
# Check if we ought to create the file.
_check_version(version)
# Ensure that the given dtype is an authentic dtype object rather
# than just something that can be interpreted as a dtype object.
dtype = numpy.dtype(dtype)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
d = dict(
descr=dtype_to_descr(dtype),
fortran_order=fortran_order,
shape=shape,
)
# If we got here, then it should be safe to create the file.
with open(os_fspath(filename), mode+'b') as fp:
_write_array_header(fp, d, version)
offset = fp.tell()
else:
# Read the header of the file first.
with open(os_fspath(filename), 'rb') as fp:
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(fp, version)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
offset = fp.tell()
if fortran_order:
order = 'F'
else:
order = 'C'
# We need to change a write-only mode to a read-write mode since we've
# already written data to the file.
if mode == 'w+':
mode = 'r+'
marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
mode=mode, offset=offset)
return marray
def _read_bytes(fp, size, error_template="ran out of data"):
"""
Read from file-like object until size bytes are read.
Raises ValueError if not EOF is encountered before size bytes are read.
Non-blocking objects only supported if they derive from io objects.
Required as e.g. ZipExtFile in python 2.6 can return less data than
requested.
"""
data = bytes()
while True:
# io files (default in python3) return None or raise on
# would-block, python2 file will truncate, probably nothing can be
# done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
if len(r) == 0 or len(data) == size:
break
except io.BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
raise ValueError(msg % (error_template, size, len(data)))
else:
return data
|
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.identity_toolkit import tenant_pb2
from google3.cloud.graphite.mmv2.services.google.identity_toolkit import tenant_pb2_grpc
from typing import List
class Tenant(object):
def __init__(
self,
name: str = None,
display_name: str = None,
allow_password_signup: bool = None,
enable_email_link_signin: bool = None,
disable_auth: bool = None,
enable_anonymous_user: bool = None,
mfa_config: dict = None,
test_phone_numbers: dict = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.display_name = display_name
self.allow_password_signup = allow_password_signup
self.enable_email_link_signin = enable_email_link_signin
self.disable_auth = disable_auth
self.enable_anonymous_user = enable_anonymous_user
self.mfa_config = mfa_config
self.test_phone_numbers = test_phone_numbers
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = tenant_pb2_grpc.IdentitytoolkitAlphaTenantServiceStub(channel.Channel())
request = tenant_pb2.ApplyIdentitytoolkitAlphaTenantRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
request.resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
request.resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
request.resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
request.resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
request.resource.mfa_config.CopyFrom(
TenantMfaConfig.to_proto(self.mfa_config)
)
else:
request.resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
request.resource.test_phone_numbers = Primitive.to_proto(
self.test_phone_numbers
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyIdentitytoolkitAlphaTenant(request)
self.name = Primitive.from_proto(response.name)
self.display_name = Primitive.from_proto(response.display_name)
self.allow_password_signup = Primitive.from_proto(
response.allow_password_signup
)
self.enable_email_link_signin = Primitive.from_proto(
response.enable_email_link_signin
)
self.disable_auth = Primitive.from_proto(response.disable_auth)
self.enable_anonymous_user = Primitive.from_proto(
response.enable_anonymous_user
)
self.mfa_config = TenantMfaConfig.from_proto(response.mfa_config)
self.test_phone_numbers = Primitive.from_proto(response.test_phone_numbers)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = tenant_pb2_grpc.IdentitytoolkitAlphaTenantServiceStub(channel.Channel())
request = tenant_pb2.DeleteIdentitytoolkitAlphaTenantRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
request.resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
request.resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
request.resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
request.resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
request.resource.mfa_config.CopyFrom(
TenantMfaConfig.to_proto(self.mfa_config)
)
else:
request.resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
request.resource.test_phone_numbers = Primitive.to_proto(
self.test_phone_numbers
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteIdentitytoolkitAlphaTenant(request)
@classmethod
def list(self, project, service_account_file=""):
stub = tenant_pb2_grpc.IdentitytoolkitAlphaTenantServiceStub(channel.Channel())
request = tenant_pb2.ListIdentitytoolkitAlphaTenantRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListIdentitytoolkitAlphaTenant(request).items
def to_proto(self):
resource = tenant_pb2.IdentitytoolkitAlphaTenant()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
resource.mfa_config.CopyFrom(TenantMfaConfig.to_proto(self.mfa_config))
else:
resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
resource.test_phone_numbers = Primitive.to_proto(self.test_phone_numbers)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class TenantMfaConfig(object):
def __init__(self, state: str = None, enabled_providers: list = None):
self.state = state
self.enabled_providers = enabled_providers
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = tenant_pb2.IdentitytoolkitAlphaTenantMfaConfig()
if TenantMfaConfigStateEnum.to_proto(resource.state):
res.state = TenantMfaConfigStateEnum.to_proto(resource.state)
if TenantMfaConfigEnabledProvidersEnumArray.to_proto(
resource.enabled_providers
):
res.enabled_providers.extend(
TenantMfaConfigEnabledProvidersEnumArray.to_proto(
resource.enabled_providers
)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return TenantMfaConfig(
state=TenantMfaConfigStateEnum.from_proto(resource.state),
enabled_providers=TenantMfaConfigEnabledProvidersEnumArray.from_proto(
resource.enabled_providers
),
)
class TenantMfaConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [TenantMfaConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [TenantMfaConfig.from_proto(i) for i in resources]
class TenantMfaConfigStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return tenant_pb2.IdentitytoolkitAlphaTenantMfaConfigStateEnum.Value(
"IdentitytoolkitAlphaTenantMfaConfigStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return tenant_pb2.IdentitytoolkitAlphaTenantMfaConfigStateEnum.Name(resource)[
len("IdentitytoolkitAlphaTenantMfaConfigStateEnum") :
]
class TenantMfaConfigEnabledProvidersEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return tenant_pb2.IdentitytoolkitAlphaTenantMfaConfigEnabledProvidersEnum.Value(
"IdentitytoolkitAlphaTenantMfaConfigEnabledProvidersEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return tenant_pb2.IdentitytoolkitAlphaTenantMfaConfigEnabledProvidersEnum.Name(
resource
)[len("IdentitytoolkitAlphaTenantMfaConfigEnabledProvidersEnum") :]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
# Copyright (c) 2009, 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import errno
import os
import sys
import ovs.daemon
import ovs.json
import ovs.jsonrpc
import ovs.poller
import ovs.stream
def handle_rpc(rpc, msg):
done = False
reply = None
if msg.type == ovs.jsonrpc.Message.T_REQUEST:
if msg.method == "echo":
reply = ovs.jsonrpc.Message.create_reply(msg.params, msg.id)
else:
reply = ovs.jsonrpc.Message.create_error(
{"error": "unknown method"}, msg.id)
sys.stderr.write("unknown request %s" % msg.method)
elif msg.type == ovs.jsonrpc.Message.T_NOTIFY:
if msg.method == "shutdown":
done = True
else:
rpc.error(errno.ENOTTY)
sys.stderr.write("unknown notification %s" % msg.method)
else:
rpc.error(errno.EPROTO)
sys.stderr.write("unsolicited JSON-RPC reply or error\n")
if reply:
rpc.send(reply)
return done
def do_listen(name):
error, pstream = ovs.stream.PassiveStream.open(name)
if error:
sys.stderr.write("could not listen on \"%s\": %s\n"
% (name, os.strerror(error)))
sys.exit(1)
ovs.daemon.daemonize()
rpcs = []
done = False
while True:
# Accept new connections.
error, stream = pstream.accept()
if stream:
rpcs.append(ovs.jsonrpc.Connection(stream))
elif error != errno.EAGAIN:
sys.stderr.write("PassiveStream.accept() failed\n")
sys.exit(1)
# Service existing connections.
dead_rpcs = []
for rpc in rpcs:
rpc.run()
error = 0
if not rpc.get_backlog():
error, msg = rpc.recv()
if not error:
if handle_rpc(rpc, msg):
done = True
error = rpc.get_status()
if error:
rpc.close()
dead_rpcs.append(rpc)
rpcs = [rpc for rpc in rpcs if not rpc in dead_rpcs]
if done and not rpcs:
break
poller = ovs.poller.Poller()
pstream.wait(poller)
for rpc in rpcs:
rpc.wait(poller)
if not rpc.get_backlog():
rpc.recv_wait(poller)
poller.block()
pstream.close()
def do_request(name, method, params_string):
params = ovs.json.from_string(params_string)
msg = ovs.jsonrpc.Message.create_request(method, params)
s = msg.is_valid()
if s:
sys.stderr.write("not a valid JSON-RPC request: %s\n" % s)
sys.exit(1)
error, stream = ovs.stream.Stream.open_block(ovs.stream.Stream.open(name))
if error:
sys.stderr.write("could not open \"%s\": %s\n"
% (name, os.strerror(error)))
sys.exit(1)
rpc = ovs.jsonrpc.Connection(stream)
error = rpc.send(msg)
if error:
sys.stderr.write("could not send request: %s\n" % os.strerror(error))
sys.exit(1)
error, msg = rpc.recv_block()
if error:
sys.stderr.write("error waiting for reply: %s\n" % os.strerror(error))
sys.exit(1)
print ovs.json.to_string(msg.to_json())
rpc.close()
def do_notify(name, method, params_string):
params = ovs.json.from_string(params_string)
msg = ovs.jsonrpc.Message.create_notify(method, params)
s = msg.is_valid()
if s:
sys.stderr.write("not a valid JSON-RPC notification: %s\n" % s)
sys.exit(1)
error, stream = ovs.stream.Stream.open_block(ovs.stream.Stream.open(name))
if error:
sys.stderr.write("could not open \"%s\": %s\n"
% (name, os.strerror(error)))
sys.exit(1)
rpc = ovs.jsonrpc.Connection(stream)
error = rpc.send_block(msg)
if error:
sys.stderr.write("could not send notification: %s\n"
% os.strerror(error))
sys.exit(1)
rpc.close()
def main(argv):
parser = argparse.ArgumentParser(
description="JSON-RPC test utility for Python.",
formatter_class=argparse.RawDescriptionHelpFormatter)
commands = {"listen": (do_listen, 1),
"request": (do_request, 3),
"notify": (do_notify, 3),
"help": (parser.print_help, (0,))}
group_description = """\
listen LOCAL listen for connections on LOCAL
request REMOTE METHOD PARAMS send request, print reply
notify REMOTE METHOD PARAMS send notification and exit
""" + ovs.stream.usage("JSON-RPC")
group = parser.add_argument_group(title="Commands",
description=group_description)
group.add_argument('command', metavar="COMMAND", nargs=1,
choices=commands, help="Command to use.")
group.add_argument('command_args', metavar="ARG", nargs='*',
help="Arguments to COMMAND.")
ovs.daemon.add_args(parser)
args = parser.parse_args()
ovs.daemon.handle_args(args)
command_name = args.command[0]
args = args.command_args
if not command_name in commands:
sys.stderr.write("%s: unknown command \"%s\" "
"(use --help for help)\n" % (argv[0], command_name))
sys.exit(1)
func, n_args = commands[command_name]
if type(n_args) == tuple:
if len(args) < n_args[0]:
sys.stderr.write("%s: \"%s\" requires at least %d arguments but "
"only %d provided\n"
% (argv[0], command_name, n_args, len(args)))
sys.exit(1)
elif type(n_args) == int:
if len(args) != n_args:
sys.stderr.write("%s: \"%s\" requires %d arguments but %d "
"provided\n"
% (argv[0], command_name, n_args, len(args)))
sys.exit(1)
else:
assert False
func(*args)
if __name__ == '__main__':
main(sys.argv)
|
|
# import_export_open_people/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import OpenPeopleApiCounterManager
from config.base import get_environment_variable
from concurrent.futures import ThreadPoolExecutor, as_completed
from exception.models import handle_exception, handle_record_found_more_than_one_exception
import json
import requests
from requests.structures import CaseInsensitiveDict
from voter.controllers_contacts import assemble_contact_display_name
from voter.models import VoterContactEmail
import wevote_functions.admin
from wevote_functions.functions import convert_state_text_to_state_code, convert_to_int, \
display_city_with_correct_capitalization, display_full_name_with_correct_capitalization, \
generate_date_as_integer, positive_value_exists
from wevote_settings.models import WeVoteSetting, WeVoteSettingsManager
logger = wevote_functions.admin.get_logger(__name__)
OPEN_PEOPLE_USERNAME = get_environment_variable("OPEN_PEOPLE_USERNAME", no_exception=True)
OPEN_PEOPLE_PASSWORD = get_environment_variable("OPEN_PEOPLE_PASSWORD", no_exception=True)
def augment_emails_for_voter_with_open_people(voter_we_vote_id=''):
status = ''
success = True
email_not_found_list = []
api_counter_manager = OpenPeopleApiCounterManager()
from voter.models import VoterManager
voter_manager = VoterManager()
# Augment all voter contacts with data from Open People
voter_contact_results = voter_manager.retrieve_voter_contact_email_list(
imported_by_voter_we_vote_id=voter_we_vote_id)
if not voter_contact_results['voter_contact_email_list_found']:
status += "NO_EMAILS_TO_AUGMENT "
results = {
'success': success,
'status': status,
}
return results
email_addresses_returned_list = voter_contact_results['email_addresses_returned_list']
# Note: We rely on email_outbound/controller.py augment_emails_for_voter_with_we_vote_data having
# created a contact_email_augmented entry for every one of these emails previously
# #########
# Get list of emails which need to be augmented (updated) with data from Open People
results = voter_manager.retrieve_contact_email_augmented_list(
checked_against_open_people_more_than_x_days_ago=30,
email_address_text_list=email_addresses_returned_list,
read_only=False,
)
contact_email_augmented_list = results['contact_email_augmented_list']
contact_email_augmented_list_as_dict = results['contact_email_augmented_list_as_dict']
email_addresses_returned_list = results['email_addresses_returned_list']
email_addresses_remaining_list = email_addresses_returned_list
# If we need to make a query, get or generate an updated access token
open_people_authentication_token = ''
if len(email_addresses_remaining_list) > 0:
open_people_authentication_token = fetch_open_people_authentication_token()
if len(email_addresses_remaining_list) == 0:
status += "NO_MORE_EMAILS_TO_CHECK_AGAINST_OPEN_PEOPLE "
elif not positive_value_exists(open_people_authentication_token):
status += "VALID_OPEN_PEOPLE_AUTHENTICATION_TOKEN_NOT_FOUND "
print(status)
else:
# Now reach out to Open People, with outer limit of 2000, but in blocks of 100 which must complete
# and be saved before the next block of 100 is started
failed_api_count = 0
loop_count = 0
safety_valve_triggered = False
number_of_outer_loop_executions_allowed = 40 # 2000 total = 40 loops * 50 number_executed_per_block
number_executed_per_block = 50
while len(email_addresses_remaining_list) > 0 and not safety_valve_triggered:
loop_count += 1
safety_valve_triggered = loop_count >= number_of_outer_loop_executions_allowed
email_address_list_chunk = email_addresses_remaining_list[:number_executed_per_block]
email_addresses_remaining_list = list(set(email_addresses_remaining_list) - set(email_address_list_chunk))
if len(email_address_list_chunk) == 0:
break
open_people_results = query_open_people_email_from_list(
email_list=email_address_list_chunk,
authentication_token=open_people_authentication_token)
number_of_items_sent_in_query = open_people_results['number_of_items_sent_in_query']
if not open_people_results['success']:
failed_api_count += 1
if failed_api_count >= 3:
safety_valve_triggered = True
status += "OPEN_PEOPLE_API_FAILED_3_TIMES "
if failed_api_count == 3:
print(status)
elif open_people_results['email_results_found']:
# A dict of results from Open People, with lowercase email_address_text as the key
email_results_dict = open_people_results['email_results_dict']
# print(email_results_dict)
# Update our cached augmented data
for contact_email_augmented in contact_email_augmented_list:
if contact_email_augmented.email_address_text in email_results_dict:
open_people_data = email_results_dict[contact_email_augmented.email_address_text]
augmented_email_found = open_people_data['augmented_email_found'] \
if 'augmented_email_found' in open_people_data else False
if augmented_email_found:
city = open_people_data['city'] if 'city' in open_people_data else None
first_name = open_people_data['first_name'] if 'first_name' in open_people_data else None
last_name = open_people_data['last_name'] if 'last_name' in open_people_data else None
middle_name = open_people_data['middle_name'] if 'middle_name' in open_people_data else None
state = open_people_data['state'] if 'state' in open_people_data else None
if positive_value_exists(state):
state_code = convert_state_text_to_state_code(state)
else:
state_code = None
zip_code = open_people_data['zip_code'] if 'zip_code' in open_people_data else None
results = voter_manager.update_or_create_contact_email_augmented(
checked_against_open_people=True,
email_address_text=contact_email_augmented.email_address_text,
existing_contact_email_augmented_dict=contact_email_augmented_list_as_dict,
open_people_city=city,
open_people_first_name=first_name,
open_people_last_name=last_name,
open_people_middle_name=middle_name,
open_people_state_code=state_code,
open_people_zip_code=zip_code,
)
if not results['success']:
status += results['status']
else:
email_not_found_list.append(contact_email_augmented.email_address_text)
else:
email_not_found_list = list(set(email_not_found_list + email_address_list_chunk))
# Use Open People API call counter to track the number of queries we are doing each day
if positive_value_exists(number_of_items_sent_in_query):
api_counter_manager.create_counter_entry(
'EmailAddressSearch',
number_of_items_sent_in_query=number_of_items_sent_in_query)
# Mark as checked all of the email addresses where augmentation wasn't found
email_not_found_list_unique = list(set(email_not_found_list))
if len(email_not_found_list_unique) > 0:
results = voter_manager.update_contact_email_augmented_list_not_found(
checked_against_open_people=True,
email_address_text_list=email_not_found_list_unique,
)
status += results['status']
# #########
# Finally, retrieve all of the augmented data we have collected and update VoterContactEmail entries
results = voter_manager.retrieve_contact_email_augmented_list(
email_address_text_list=email_addresses_returned_list,
read_only=True,
)
if results['success'] and results['contact_email_augmented_list_found']:
contact_email_augmented_list = results['contact_email_augmented_list']
for contact_email_augmented in contact_email_augmented_list:
city = contact_email_augmented.open_people_city
city = display_city_with_correct_capitalization(city)
first_name = contact_email_augmented.open_people_first_name
last_name = contact_email_augmented.open_people_last_name
middle_name = contact_email_augmented.open_people_middle_name
state_code = contact_email_augmented.open_people_state_code
zip_code = contact_email_augmented.open_people_zip_code
contact_name_data_found = positive_value_exists(first_name) or \
positive_value_exists(last_name) or \
positive_value_exists(middle_name)
location_data_found = positive_value_exists(city) or \
positive_value_exists(state_code) or \
positive_value_exists(zip_code)
defaults = {}
if city is not None:
defaults['city'] = city
if first_name is not None:
defaults['first_name'] = first_name
if last_name is not None:
defaults['last_name'] = last_name
if middle_name is not None:
defaults['middle_name'] = middle_name
if state_code is not None:
defaults['state_code'] = state_code
if zip_code is not None:
defaults['zip_code'] = zip_code
if contact_name_data_found:
display_name_raw = assemble_contact_display_name(
first_name=first_name,
middle_name=middle_name,
last_name=last_name)
# if display_name_raw is all caps, correct the capitalization
if display_name_raw.isupper() or display_name_raw.islower():
defaults['display_name'] = display_full_name_with_correct_capitalization(display_name_raw)
else:
defaults['display_name'] = display_name_raw
# Now update all of the VoterContactEmail entries, regardless of whose contact it is
if location_data_found or contact_name_data_found:
try:
number_updated = VoterContactEmail.objects.filter(
email_address_text__iexact=contact_email_augmented.email_address_text) \
.update(**defaults)
status += "NUMBER_OF_VOTER_CONTACT_EMAIL_UPDATED: " + str(number_updated) + " "
except Exception as e:
status += "NUMBER_OF_VOTER_CONTACT_EMAIL_NOT_UPDATED: " + str(e) + " "
results = {
'success': success,
'status': status,
}
return results
def fetch_open_people_authentication_token():
we_vote_settings_manager = WeVoteSettingsManager()
authentication_token = ''
expire_date_as_integer = we_vote_settings_manager.fetch_setting('open_people_expire_date_as_integer')
if positive_value_exists(expire_date_as_integer):
date_now_as_integer = generate_date_as_integer()
if expire_date_as_integer > date_now_as_integer:
authentication_token = we_vote_settings_manager.fetch_setting('open_people_authentication_token')
if not positive_value_exists(authentication_token):
response_dict = query_open_people_for_authentication_token()
authentication_token = response_dict['token'] if 'token' in response_dict else ''
token_expiry_utc = response_dict['token_expiry_utc'] if 'token_expiry_utc' in response_dict else ''
expire_date_string = token_expiry_utc[:10]
date_as_string = expire_date_string.replace('-', '')
date_as_integer = convert_to_int(date_as_string)
if positive_value_exists(date_as_integer):
we_vote_settings_manager.save_setting(
'open_people_expire_date_as_integer',
date_as_integer,
value_type=WeVoteSetting.INTEGER)
we_vote_settings_manager.save_setting(
'open_people_authentication_token',
authentication_token,
value_type=WeVoteSetting.STRING)
return authentication_token
def query_open_people_email_search(email='', authentication_token=''):
headers = CaseInsensitiveDict()
headers["accept"] = "text/plain"
headers["Authorization"] = "Bearer " + authentication_token
headers["Content-Type"] = "application/json"
data = '{"emailAddress":"' + email + '"}'
response = requests.post(
"https://api.openpeoplesearch.com/api/v1/Consumer/EmailAddressSearch",
headers=headers,
data=data,
)
structured_json = json.loads(response.text)
# print(structured_json)
return structured_json
def query_open_people_for_authentication_token():
headers = CaseInsensitiveDict()
headers["accept"] = "*/*"
headers["Content-Type"] = "application/json"
data = '{"username":"' + OPEN_PEOPLE_USERNAME + '","password":"' + OPEN_PEOPLE_PASSWORD + '"}'
response = requests.post(
"https://api.openpeoplesearch.com/api/v1/User/authenticate",
headers=headers,
data=data,
)
structured_json = json.loads(response.text)
return structured_json
def query_open_people_phone_search(phone_number='', authentication_token=''):
headers = CaseInsensitiveDict()
headers["accept"] = "text/plain"
headers["Authorization"] = 'Bearer {authentication_token}'.format(authentication_token=authentication_token)
headers["Content-Type"] = "application/json"
response = requests.post(
'https://api.openpeoplesearch.com/api/v1/Consumer/PhoneSearch',
headers=headers,
data={
'phoneNumber': phone_number,
},
)
structured_json = json.loads(response.text)
return structured_json
def query_open_people_email_from_list(email_list=[], authentication_token=''):
success = True
status = ""
email_results_dict = {}
email_results_found = False
number_of_items_sent_in_query = 0
if not len(email_list) > 0:
status += "MISSING_EMAIL_LIST "
success = False
results = {
'success': success,
'status': status,
'email_results_found': email_results_found,
'email_results_dict': email_results_dict,
'number_of_items_sent_in_query': number_of_items_sent_in_query,
}
return results
# Linear for testing
# for one_email in email_list:
# one_result = {}
# number_of_items_sent_in_query += 1
# try:
# one_result = query_and_extract_from_open_people_email_address_search(
# email=one_email,
# authentication_token=authentication_token)
# email_address = one_result['email_address_text']
# email_address = email_address.lower()
# email_results_dict[email_address] = one_result
# if one_result['augmented_email_found']:
# email_results_found = True
# except Exception as e:
# status += one_result['status'] if 'status' in one_result else ''
# status += "CRASHING_ERROR: " + str(e) + ' '
# Multi-thread for production
threads = []
with ThreadPoolExecutor(max_workers=20) as executor:
for email in email_list:
threads.append(executor.submit(query_and_extract_from_open_people_email_address_search,
email, authentication_token))
number_of_items_sent_in_query += 1
for task in as_completed(threads):
try:
one_result = task.result()
# print(one_result)
email_address = one_result['email_address_text']
email_address = email_address.lower()
email_results_dict[email_address] = one_result
if one_result['augmented_email_found']:
email_results_found = True
except Exception as e:
status += one_result['status'] if 'status' in one_result else ''
status += "CRASHING_ERROR: " + str(e) + ' '
results = {
'success': success,
'status': status,
'email_results_found': email_results_found,
'email_results_dict': email_results_dict,
'number_of_items_sent_in_query': number_of_items_sent_in_query,
}
return results
def query_and_extract_from_open_people_email_address_search(email='', authentication_token=''):
success = True
status = ""
address_dict_with_highest_score = {}
augmented_email_found = False
json_from_open_people = {}
name_dict_with_highest_score = {}
if not positive_value_exists(email):
status += "MISSING_EMAIL "
success = False
results = {
'success': success,
'status': status,
'augmented_email_found': augmented_email_found,
'city': None,
'email_address_text': email,
'state': None,
'zip_code': None,
}
return results
try:
json_from_open_people = query_open_people_email_search(email=email, authentication_token=authentication_token)
if 'errors' in json_from_open_people:
status += "[" + json_from_open_people['errors'] + "] "
except Exception as e:
success = False
status += 'QUERY_OPEN_PEOPLE_EMAIL_SEARCH_API_FAILED: ' + str(e) + ' '
handle_exception(e, logger=logger, exception_message=status)
number_of_records_found = json_from_open_people['records'] if 'records' in json_from_open_people else 0
results_list = json_from_open_people['results'] if 'results' in json_from_open_people else []
index_number = 0
addresses_dict = {}
most_recent_address_date = 0
most_recent_address_dict = {}
most_recent_name_date = 0
most_recent_name_dict = {}
names_dict = {}
for open_people_profile in results_list:
reported_date = open_people_profile['reportedDate'] if 'reportedDate' in open_people_profile else ''
reported_date_as_string = reported_date[:10]
reported_date_as_string = reported_date_as_string.replace('-', '')
reported_date_as_integer = convert_to_int(reported_date_as_string)
# Possible name (set) - firstName, middleName, lastName
first_name = open_people_profile['firstName'] if 'firstName' in open_people_profile else None
middle_name = open_people_profile['middleName'] if 'middleName' in open_people_profile else None
last_name = open_people_profile['lastName'] if 'lastName' in open_people_profile else None
if first_name and last_name:
augmented_email_found = True
name_dict = {
'first_name': first_name,
'middle_name': middle_name,
'last_name': last_name,
}
names_dict[index_number] = name_dict
if reported_date_as_integer > most_recent_name_date:
most_recent_name_date = reported_date_as_integer
most_recent_name_dict = name_dict
# Possible address (set) - city, state, zip
city = open_people_profile['city'] if 'city' in open_people_profile else None
state = open_people_profile['state'] if 'state' in open_people_profile else None
zip_code = open_people_profile['zip'] if 'zip' in open_people_profile else None
if city or state:
augmented_email_found = True
address_dict = {
'city': city,
'state': state,
'zip_code': zip_code,
}
addresses_dict[index_number] = address_dict
if reported_date_as_integer > most_recent_address_date:
most_recent_address_date = reported_date_as_integer
most_recent_address_dict = address_dict
# Possible phone - phone
index_number += 1
if augmented_email_found:
# Address with higher score is...
address_dict_with_highest_score = most_recent_address_dict
name_dict_with_highest_score = most_recent_name_dict
print(json_from_open_people)
results = {
'success': success,
'status': status,
'augmented_email_found': augmented_email_found,
'email_address_text': email,
'city': address_dict_with_highest_score['city'] if 'city' in address_dict_with_highest_score else None,
'first_name':
name_dict_with_highest_score['first_name'] if 'first_name' in name_dict_with_highest_score else None,
'last_name':
name_dict_with_highest_score['last_name'] if 'last_name' in name_dict_with_highest_score else None,
'middle_name':
name_dict_with_highest_score['middle_name'] if 'middle_name' in name_dict_with_highest_score else None,
'state': address_dict_with_highest_score['state'] if 'state' in address_dict_with_highest_score else None,
'zip_code':
address_dict_with_highest_score['zip_code'] if 'zip_code' in address_dict_with_highest_score else None,
}
return results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.