input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# Copyright 2016 NOKIA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# run me using :
# python -m testtools.run nuage_neutron/tests/unit/test_mech_nuage.py
import mock
import oslo_config
import testtools
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from neutron.conf import common as core_config
from neutron.conf.plugins.ml2 import config as ml2_config
from nuage_neutron.plugins.common.base_plugin import RootNuagePlugin
from nuage_neutron.plugins.common import config
from nuage_neutron.plugins.common.exceptions import NuageBadRequest
from nuage_neutron.plugins.common import nuagedb
from nuage_neutron.plugins.nuage_ml2.mech_nuage import NuageMechanismDriver
from nuage_neutron.vsdclient.impl.vsdclientimpl import VsdClientImpl
from nuage_neutron.vsdclient.restproxy import RESTProxyError
from nuage_neutron.vsdclient.restproxy import RESTProxyServer
class ConfigTypes(object):
MINIMAL_CONFIG = 1
MISSING_SERVICE_PLUGIN = 2
MISSING_ML2_EXTENSION = 3
NUAGE_PAT_WITH_NUAGE_UNDERLAY_CONFIG = 4
NUAGE_L2BRIDGE_WITHOUT_NUAGE_NETWORK = 5
class TestNuageMechanismDriver(testtools.TestCase):
@classmethod
def setUpClass(cls):
super(TestNuageMechanismDriver, cls).setUpClass()
# make sure we have the configs
if core_config.core_opts is None or ml2_config.ml2_opts is None:
cls.fail('Fix your setup.')
# disable the auth key renewal in VsdClient
VsdClientImpl.set_auth_key_renewal(False)
def set_config_fixture(self, config_type=ConfigTypes.MINIMAL_CONFIG):
ml2_config.register_ml2_plugin_opts()
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(group='RESTPROXY', server='localhost:9876')
conf.config(group='RESTPROXY', server_timeout=1)
conf.config(group='RESTPROXY', server_max_retries=1)
conf.config(group='RESTPROXY', cms_id='1')
conf.config(group='PLUGIN', enable_debug='api_stats')
if config_type == ConfigTypes.MISSING_SERVICE_PLUGIN:
conf.config(service_plugins=['NuagePortAttributes',
'NuageL3'])
else:
conf.config(service_plugins=['NuagePortAttributes',
'NuageL3', 'NuageAPI'])
if config_type == ConfigTypes.MISSING_ML2_EXTENSION:
conf.config(group='ml2',
extension_drivers=['nuage_subnet',
'nuage_port'])
else:
conf.config(group='ml2',
extension_drivers=['nuage_subnet',
'nuage_port',
'port_security'])
if config_type == ConfigTypes.NUAGE_L2BRIDGE_WITHOUT_NUAGE_NETWORK:
conf.config(service_plugins=['NuagePortAttributes',
'NuageL3', 'NuageAPI',
'NuageL2Bridge'])
return conf
# get me a Nuage mechanism driver
def get_me_a_nmd(self):
self.set_config_fixture()
nmd = NuageMechanismDriver()
nmd._l2_plugin = nmd
nmd.initialize()
return nmd
@staticmethod
def get_me_a_rest_proxy():
vsd_client = RESTProxyServer(server='localhost:9876',
base_uri='/nuage/api/v6',
serverssl=True,
verify_cert='False',
serverauth='1:1',
auth_resource='/me',
organization='org')
return vsd_client
# NETWORK DRIVER INITIALIZATION CHECKS
def test_init_native_nmd_missing_service_plugin(self):
self.set_config_fixture(ConfigTypes.MISSING_SERVICE_PLUGIN)
self.assertRaisesRegex(
oslo_config.cfg.ConfigFileValueError,
r'Missing required service_plugin\(s\) '
r'\[\'NuageAPI\'\] for mechanism driver nuage',
NuageMechanismDriver().initialize)
def test_init_native_nmd_missing_ml2_extension(self):
self.set_config_fixture(ConfigTypes.MISSING_ML2_EXTENSION)
self.assertRaisesRegex(
oslo_config.cfg.ConfigFileValueError,
r'Missing required extension\(s\) '
r'\[\'port_security\'\] for mechanism driver nuage',
NuageMechanismDriver().initialize)
def test_init_missing_nuage_network_ml2_extension_for_l2bridge(self):
self.set_config_fixture(
ConfigTypes.NUAGE_L2BRIDGE_WITHOUT_NUAGE_NETWORK)
self.assertRaisesRegex(
oslo_config.cfg.ConfigFileValueError,
'Missing required extension '
r'\'nuage_network\' for service plugin NuageL2Bridge',
NuageMechanismDriver().initialize)
def test_init_native_nmd_invalid_server(self):
self.set_config_fixture()
self.assertRaisesRegex(
RESTProxyError,
'Error in REST call to VSD: '
'Could not establish a connection with the VSD. '
'Please check VSD URI path in plugin config '
'and verify IP connectivity.',
NuageMechanismDriver().initialize)
@mock.patch.object(RESTProxyServer, 'raise_rest_error')
@mock.patch.object(VsdClientImpl, 'verify_cms')
def test_multi_init_nmd_invalid_server(self, *_):
# init nmd 3 times
nmd1 = self.get_me_a_nmd()
nmd2 = self.get_me_a_nmd()
nmd3 = self.get_me_a_nmd()
# validate there is actually only 1 vsdclient (memoize)
self.assertEqual(nmd2.vsdclient, nmd1.vsdclient)
self.assertEqual(nmd3.vsdclient, nmd1.vsdclient)
# validate no api call is made - we don't count authentication calls!
self.assertEqual(0, nmd1.vsdclient.restproxy.api_count)
# FLAT NETWORKS
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'is_external',
return_value=False)
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
def test_create_subnet_precommit_in_flat_network(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'flat',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 4,
'cidr': '10.0.0.0/24'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
def test_create_subnet_precommit_in_flat_net_with_nuagenet(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'flat',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'nuagenet': '0x100',
'ip_version': 4,
'cidr': '10.0.0.0/24'}
try:
nmd.create_subnet_precommit(Context(network, subnet))
self.fail('Subnet precommit should not have succeeded')
except NuageBadRequest as e:
self.assertEqual('Bad request: Network should have \'provider:'
'network_type\' vxlan or have such a segment',
str(e))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
def test_create_vsd_mgd_subnet_precommit_in_flat_net(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'flat',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'nuagenet': '0x100',
'net_partition': 'lalaland',
'ip_version': 4,
'cidr': '10.0.0.0/24'}
try:
nmd.create_subnet_precommit(Context(network, subnet))
self.fail('Create subnet precommit should not have succeeded')
except NuageBadRequest as e:
self.assertEqual('Bad request: Network should have \'provider:'
'network_type\' vxlan or have such a segment',
str(e))
# VXLAN NETWORKS
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(NuageMechanismDriver, '_create_vsd_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
def test_create_subnet_precommit_with_nuagenet(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'nuagenet': '0x100',
'ip_version': 4,
'cidr': '10.0.0.0/24'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(NuageMechanismDriver, '_create_vsd_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
def test_create_vsd_mgd_subnet_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'nuagenet': '0x100',
'net_partition': 'lalaland',
'ip_version': 4,
'gateway_ip': None,
'cidr': '10.0.0.0/24'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets')
@mock.patch.object(NuageMechanismDriver, '_create_vsd_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
@mock.patch.object(nuagedb, 'get_net_partition_by_id',
return_value={'id': 1})
def test_create_vsd_mgd_v6_subnet_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'nuagenet': '0x100',
'net_partition': 'lalaland',
'ip_version': 6,
'cidr': 'fee::/64'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'default_np_id',
return_value=1)
@mock.patch.object(NuageMechanismDriver, 'is_external',
return_value=False)
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(NuageMechanismDriver,
'_create_openstack_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
@mock.patch.object(nuagedb, 'get_net_partition_by_id',
return_value={'id': 1})
@mock.patch.object(NuageMechanismDriver, '_create_nuage_subnet')
def test_create_subnet_precommit_default(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 4,
'cidr': '10.10.1.0/24',
'gateway_ip': '10.10.1.1'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(NuageMechanismDriver,
'_create_openstack_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_v6_subnet_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'fee::/64'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[{'id': 'subnet1', 'ip_version': 6},
{'id': 'subnet2', 'ip_version': 6}])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(NuageMechanismDriver, 'check_dhcp_agent_alive',
return_value=False)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(NuageMechanismDriver,
'_create_openstack_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_two_v6_subnets_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'fef::/64'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[{'id': 'subnet1', 'ip_version': 6},
{'id': 'subnet2', 'ip_version': 6}])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(NuageMechanismDriver, 'check_dhcp_agent_alive',
return_value=True)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_two_v6_subnets_with_dhcp_agent_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'eef::/64'}
try:
nmd.create_subnet_precommit(Context(network, subnet))
self.fail('This is a negative test and was not meant to pass.')
except NuageBadRequest as e:
self.assertEqual('Bad request: A network with multiple ipv4 or '
'ipv6 subnets is not allowed when '
'neutron-dhcp-agent is enabled', str(e))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[{'id': 'subnet1', 'ip_version': 4}])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(NuageMechanismDriver,
'_create_openstack_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_v4_v6_subnet_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'fee::/64'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[{'id': 'subnet1', 'ip_version': 4},
{'id': 'subnet2', 'ip_version': 4},
{'id': 'subnet2', 'ip_version': 6}])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_two_v4_v6_subnets_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'fee::/64'}
try:
nmd.create_subnet_precommit(Context(network, subnet))
self.fail('This is a negative test and was not meant to pass.')
except NuageBadRequest as e:
self.assertEqual('Bad request: A network can only have maximum 1 '
'ipv4 and 1 ipv6 subnet existing together', str(e)
)
@mock.patch.object(RESTProxyServer, 'generate_nuage_auth')
@mock.patch.object(RESTProxyServer, '_rest_call',
return_value=(401, 'Unauthorized', None, None, None,
None))
def test_rest_call_infinite_recursion(self, *_):
rest_proxy = self.get_me_a_rest_proxy()
try:
rest_proxy.rest_call('get', '', '')
except Exception as e:
self.assertEqual(True, 'Unauthorized' in str(e),
"Got an exception other than Unauthorized")
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[{'id': 'subnet1', 'ip_version': 4},
{'id': 'subnet2', 'ip_version': 6},
{'id': 'subnet2', 'ip_version': 4}])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_v4_v6_v4_subnets_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'fee::/64'}
try:
nmd.create_subnet_precommit(Context(network, subnet))
self.fail('This is a negative test and was not meant to pass.')
except NuageBadRequest as e:
self.assertEqual('Bad request: A network can only have maximum 1 '
'ipv4 and 1 ipv6 subnet existing together', str(e)
)
# DEFAULT ALLOW NON IP CHECKS
def test_default_allow_non_ip_not_set(self):
self.assertFalse(config.default_allow_non_ip())
def test_default_allow_non_ip_set_empty_string(self):
try:
cfg = self.set_config_fixture()
cfg.config(group='PLUGIN', default_allow_non_ip='')
self.fail('From Ocata onwards oslo is correctly checking its '
'config value parsing; '
'hence this line shd not be reached.')
except ValueError as e:
self.assertEqual('Unexpected boolean value \'\'', str(e))
def test_default_allow_non_ip_set(self):
cfg = self.set_config_fixture()
cfg.config(group='PLUGIN', default_allow_non_ip=True)
self.assertTrue(config.default_allow_non_ip())
# ip utility checks
def test_ip_comparison(self):
self.assertTrue(NuageMechanismDriver.compare_ip(
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', 'fdf8:f53e:61e4::18'))
self.assertFalse(NuageMechanismDriver.compare_cidr(
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b', 'fdf8:f53e:61e4::18'))
def test_cidr_comparison(self):
self.assertTrue(NuageMechanismDriver.compare_cidr(
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64', 'fdf8:f53e:61e4::18/64'))
self.assertFalse(NuageMechanismDriver.compare_cidr(
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64', 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/63'))
def test_needs_vport_creation_basic(self):
self.assertFalse(NuageMechanismDriver.needs_vport_creation(
'nuage:vip'))
def test_needs_vport_creation_using_prefix(self):
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(group='PLUGIN', device_owner_prefix='no_vport')
# | |
dbname}
if args['psql_host'] is not None: query_args['host'] = args['psql_host']
if args['psql_password'] is not None: query_args['password'] = args['psql_password']
con = psycopg2.connect(**query_args)
cur = con.cursor()
# Query on ventilation data
cur.execute('SET search_path to ' + schema_name)
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.ventnum, v.starttime, v.endtime
FROM icustay_detail i
INNER JOIN ventdurations v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.starttime between intime and outtime
and v.endtime between intime and outtime;
""".format(icuids=','.join(icuids_to_keep))
vent_data = pd.read_sql_query(query, con)
vent_data = continuous_outcome_processing(vent_data, data, icustay_timediff)
vent_data = vent_data.apply(add_outcome_indicators)
vent_data.rename(columns = {'on':'vent'}, inplace=True)
vent_data = vent_data.reset_index()
# Get the patients without the intervention in there too so that we
ids_with = vent_data['icustay_id']
ids_with = set(map(int, ids_with))
ids_all = set(map(int, icuids_to_keep))
ids_without = (ids_all - ids_with)
#ids_without = map(int, ids_without)
# Create a new fake dataframe with blanks on all vent entries
out_data = data.copy(deep=True)
out_data = out_data.reset_index()
out_data = out_data.set_index('icustay_id')
out_data = out_data.iloc[out_data.index.isin(ids_without)]
out_data = out_data.reset_index()
out_data = out_data[['subject_id', 'hadm_id', 'icustay_id']]
out_data['max_hours'] = out_data['icustay_id'].map(icustay_timediff)
# Create all 0 column for vent
out_data = out_data.groupby('icustay_id')
out_data = out_data.apply(add_blank_indicators)
out_data.rename(columns = {'on':'vent'}, inplace=True)
out_data = out_data.reset_index()
# Concatenate all the data vertically
Y = pd.concat([vent_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', 'vent']],
out_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', 'vent']]],
axis=0)
# Start merging all other interventions
table_names = ['vasopressordurations', 'adenosinedurations', 'dobutaminedurations', 'dopaminedurations', 'epinephrinedurations', 'isupreldurations',
'milrinonedurations', 'norepinephrinedurations', 'phenylephrinedurations', 'vasopressindurations']
column_names = ['vaso', 'adenosine', 'dobutamine', 'dopamine', 'epinephrine', 'isuprel',
'milrinone', 'norepinephrine', 'phenylephrine', 'vasopressin']
# TODO(mmd): This section doesn't work. What is its purpose?
for t, c in zip(table_names, column_names):
# TOTAL VASOPRESSOR DATA
cur.execute('SET search_path to ' + schema_name)
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.vasonum, v.starttime, v.endtime
FROM icustay_detail i
INNER JOIN {table} v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.starttime between intime and outtime
and v.endtime between intime and outtime;
""".format(icuids=','.join(icuids_to_keep), table=t)
new_data = pd.read_sql_query(query,con)
new_data = continuous_outcome_processing(new_data, data, icustay_timediff)
new_data = new_data.apply(add_outcome_indicators)
new_data.rename(columns = {'on':c}, inplace=True)
new_data = new_data.reset_index()
# c may not be in Y if we are only extracting a subset of the population, in which c was never
# performed.
if not c in new_data:
print "Column ", c, " not in data."
continue
Y = Y.merge(
new_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', c]],
on=['subject_id', 'hadm_id', 'icustay_id', 'hours_in'],
how='left'
)
# Sort the values
Y.fillna(0, inplace=True)
Y[c] = Y[c].astype(int)
#Y = Y.sort_values(['subject_id', 'icustay_id', 'hours_in']) #.merge(df3,on='name')
Y = Y.reset_index(drop=True)
print 'Extracted ' + c + ' from ' + t
tasks=["colloid_bolus", "crystalloid_bolus", "nivdurations"]
for task in tasks:
cur.execute('SET search_path to ' + schema_name)
if task=='nivdurations':
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.starttime, v.endtime
FROM icustay_detail i
INNER JOIN {table} v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.starttime between intime and outtime
and v.endtime between intime and outtime;
""".format(icuids=','.join(icuids_to_keep), table=task)
else:
query = """
select i.subject_id, i.hadm_id, v.icustay_id, v.charttime AS starttime,
v.charttime AS endtime
FROM icustay_detail i
INNER JOIN {table} v ON i.icustay_id = v.icustay_id
where v.icustay_id in ({icuids})
and v.charttime between intime and outtime
""".format(icuids=','.join(icuids_to_keep), table=task)
new_data = pd.read_sql_query(query, con=con)
if new_data.shape[0] == 0:
continue
new_data = continuous_outcome_processing(new_data, data, icustay_timediff)
new_data = new_data.apply(add_outcome_indicators)
new_data.rename(columns = {'on':task}, inplace=True)
new_data = new_data.reset_index()
new_data.to_csv('new_task.csv')
Y = Y.merge(
new_data[['subject_id', 'hadm_id', 'icustay_id', 'hours_in', task]],
on=['subject_id', 'hadm_id', 'icustay_id', 'hours_in'],
how='left'
)
# Sort the values
Y.fillna(0, inplace=True)
Y[task] = Y[task].astype(int)
Y = Y.reset_index(drop=True)
print 'Extracted ' + task
# TODO: ADD THE RBC/PLT/PLASMA DATA
# TODO: ADD DIALYSIS DATA
# TODO: ADD INFECTION DATA
cur.close()
con.close()
Y = Y.filter(items=['subject_id', 'hadm_id', 'icustay_id', 'hours_in', 'vent'] + column_names + tasks)
Y.subject_id = Y.subject_id.astype(int)
Y.icustay_id = Y.icustay_id.astype(int)
Y.hours_in = Y.hours_in.astype(int)
Y.vent = Y.vent.astype(int)
Y.vaso = Y.vaso.astype(int)
y_id_cols = ID_COLS + ['hours_in']
Y = Y.sort_values(y_id_cols)
Y.set_index(y_id_cols, inplace=True)
print 'Shape of Y : ', Y.shape
# SAVE AS NUMPY ARRAYS AND TEXT FILES
#np_Y = Y.as_matrix()
#np.save(os.path.join(outPath, outcome_filename), np_Y)
# Turn back into columns
df = Y.reset_index()
df = sanitize_df(df, outcome_schema)
csv_fpath = os.path.join(outPath, outcome_filename)
save_sanitized_df_to_csv(csv_fpath, df, outcome_schema)
col_names = list(df.columns.values)
col_names = col_names[3:]
with open(os.path.join(outPath, outcome_columns_filename), 'w') as f:
f.write('\n'.join(col_names))
# TODO(mmd): Why does df have the index? Is sanitize making multiindex?
# SAVE THE DATA AS A PANDAS OBJECT
# TODO(<NAME>): Why writing out Y after you've separately sanitized df?
Y.to_hdf(os.path.join(outPath, outcome_hd5_filename), 'Y')
return df
# Apply the variable limits to remove things
# TODO(mmd): controlled printing.
def apply_variable_limits(df, var_ranges, var_names_index_col='LEVEL2'):
idx_vals = df.index.get_level_values(var_names_index_col)
non_null_idx = ~df.value.isnull()
var_names = set(idx_vals)
var_range_names = set(var_ranges.index.values)
for var_name in var_names:
var_name_lower = var_name.lower()
if var_name_lower not in var_range_names:
print("No known ranges for %s" % var_name)
continue
outlier_low_val, outlier_high_val, valid_low_val, valid_high_val = [
var_ranges.loc[var_name_lower, x] for x in ('OUTLIER_LOW','OUTLIER_HIGH','VALID_LOW','VALID_HIGH')
]
running_idx = non_null_idx & (idx_vals == var_name)
outlier_low_idx = (df.value < outlier_low_val)
outlier_high_idx = (df.value > outlier_high_val)
valid_low_idx = ~outlier_low_idx & (df.value < valid_low_val)
valid_high_idx = ~outlier_high_idx & (df.value > valid_high_val)
var_outlier_idx = running_idx & (outlier_low_idx | outlier_high_idx)
var_valid_low_idx = running_idx & valid_low_idx
var_valid_high_idx = running_idx & valid_high_idx
df.loc[var_outlier_idx, 'value'] = np.nan
df.loc[var_valid_low_idx, 'value'] = valid_low_val
df.loc[var_valid_high_idx, 'value'] = valid_high_val
n_outlier = sum(var_outlier_idx)
n_valid_low = sum(var_valid_low_idx)
n_valid_high = sum(var_valid_high_idx)
if n_outlier + n_valid_low + n_valid_high > 0:
print(
"%s had %d / %d rows cleaned:\n"
" %d rows were strict outliers, set to np.nan\n"
" %d rows were low valid outliers, set to %.2f\n"
" %d rows were high valid outliers, set to %.2f\n"
"" % (
var_name,
n_outlier + n_valid_low + n_valid_high, sum(running_idx),
n_outlier, n_valid_low, valid_low_val, n_valid_high, valid_high_val
)
)
return df
def plot_variable_histograms(col_names, df):
# Plot some of the data, just to make sure it looks ok
for c, vals in df.iteritems():
n = vals.dropna().count()
if n < 2: continue
# get median, variance, skewness
med = vals.dropna().median()
var = vals.dropna().var()
skew = vals.dropna().skew()
# plot
fig = plt.figure(figsize=(13, 6))
plt.subplots(figsize=(13,6))
vals.dropna().plot.hist(bins=100, label='HIST (n={})'.format(n))
# fake plots for KS test, median, etc
plt.plot([], label=' ',color='lightgray')
plt.plot([], label='Median: {}'.format(format(med,'.2f')),
color='lightgray')
plt.plot([], label='Variance: {}'.format(format(var,'.2f')),
color='lightgray')
plt.plot([], label='Skew: {}'.format(format(skew,'.2f')),
color='light:gray')
# add title, labels etc.
plt.title('{} measurements in ICU '.format(str(c)))
plt.xlabel(str(c))
plt.legend(loc="upper left", bbox_to_anchor=(1,1),fontsize=12)
plt.xlim(0, vals.quantile(0.99))
fig.savefig(os.path.join(outPath, (str(c) + '_HIST_.png')), bbox_inches='tight')
# Main, where you can call what makes sense.
if __name__ == '__main__':
# Construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('--out_path', type=str, default= '/scratch/{}/phys_acuity_modelling/data'.format(os.environ['USER']),
help='Enter the path you want the output')
ap.add_argument('--resource_path',
type=str,
default=os.path.expandvars("$MIMIC_EXTRACT_CODE_DIR/resources/"))
ap.add_argument('--extract_pop', type=int, default=1,
help='Whether or not to extract population data: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--extract_numerics', type=int, default=1,
help='Whether or not to extract numerics data: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--extract_outcomes', type=int, default=1,
help='Whether or not to extract outcome data: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--extract_codes', type=int, default=1,
help='Whether or not to extract ICD9 codes: 0 - no extraction, ' +
'1 - extract if not present in the data directory, 2 - extract even if there is data')
ap.add_argument('--pop_size', type=int, default=0,
help='Size of population to extract')
ap.add_argument('--exit_after_loading', type=int, default=0)
ap.add_argument('--var_limits', type=int, default=1,
help='Whether to create a version of the data with variable limits included. ' +
'1 - apply variable limits, 0 - do not apply variable limits')
ap.add_argument('--plot_hist', type=int, default=1,
help='Whether to plot the histograms of the data')
ap.add_argument('--psql_host', type=str, default=None,
help='Postgres host. Try "/var/run/postgresql/" for Unix domain socket errors.')
ap.add_argument('--psql_password', type=str, default=None, help='Postgres password.')
ap.add_argument('--group_by_level2', action='store_false', dest='group_by_level2', default=True,
help='Do group by level2.')
ap.add_argument('--min_percent', type=float, default=0.0,
help='Minimum percentage of row numbers need to be observations for each numeric column.' +
'min_percent = 1 means columns with more than 99 percent of nan will be removed')
ap.add_argument('--min_age', type=int, default=15,
help='Minimum age of patients to be included')
ap.add_argument('--min_duration', type=int, default=12,
help='Minimum hours of stay to be included')
ap.add_argument('--max_duration', type=int, default=240,
help='Maximum hours | |
__main__ module to make an exe file.
if not self.__writingModule('__main__'):
message = "Can't generate an executable without a __main__ module."
raise Exception(message)
if self.platform.startswith('win'):
modext = '.pyd'
else:
modext = '.so'
# First gather up the strings and code for all the module names, and
# put those in a string pool.
pool = b""
strings = set()
for moduleName, mdef in self.getModuleDefs():
strings.add(moduleName.encode('ascii'))
for value in fields.values():
if value is not None:
strings.add(value.encode('utf-8'))
# Sort by length descending, allowing reuse of partial strings.
strings = sorted(strings, key=lambda str:-len(str))
string_offsets = {}
# Now add the strings to the pool, and collect the offsets relative to
# the beginning of the pool.
for string in strings:
# First check whether it's already in there; it could be part of
# a longer string.
offset = pool.find(string + b'\0')
if offset < 0:
offset = len(pool)
pool += string + b'\0'
string_offsets[string] = offset
# Now go through the modules and add them to the pool as well. These
# are not 0-terminated, but we later record their sizes and names in
# a table after the blob header.
moduleList = []
for moduleName, mdef in self.getModuleDefs():
origName = mdef.moduleName
if mdef.forbid:
# Explicitly disallow importing this module.
moduleList.append((moduleName, 0, 0))
continue
# For whatever it's worth, align the code blocks.
if len(pool) & 3 != 0:
pad = (4 - (len(pool) & 3))
pool += b'\0' * pad
assert not mdef.exclude
# Allow importing this module.
module = self.mf.modules.get(origName, None)
code = getattr(module, "__code__", None)
if code:
code = marshal.dumps(code)
size = len(code)
if getattr(module, "__path__", None):
# Indicate package by negative size
size = -size
moduleList.append((moduleName, len(pool), size))
pool += code
continue
# This is a module with no associated Python code. It is either
# an extension module or a builtin module. Get the filename, if
# it is the former.
extensionFilename = getattr(module, '__file__', None)
if extensionFilename:
self.extras.append((moduleName, extensionFilename))
# If it is a submodule of a frozen module, Python will have
# trouble importing it as a builtin module. Synthesize a frozen
# module that loads it dynamically.
if '.' in moduleName:
if self.platform.startswith("macosx") and not use_console:
# We write the Frameworks directory to sys.path[0].
code = 'import sys;del sys.modules["%s"];import sys,os,imp;imp.load_dynamic("%s",os.path.join(sys.path[0], "%s%s"))' % (moduleName, moduleName, moduleName, modext)
else:
code = 'import sys;del sys.modules["%s"];import sys,os,imp;imp.load_dynamic("%s",os.path.join(os.path.dirname(sys.executable), "%s%s"))' % (moduleName, moduleName, moduleName, modext)
if sys.version_info >= (3, 2):
code = compile(code, moduleName, 'exec', optimize=2)
else:
code = compile(code, moduleName, 'exec')
code = marshal.dumps(code)
moduleList.append((moduleName, len(pool), len(code)))
pool += code
# Determine the format of the header and module list entries depending
# on the platform.
num_pointers = 12
stub_data = bytearray(stub_file.read())
bitnesses = self._get_executable_bitnesses(stub_data)
header_layouts = {
32: '<QQHHHH8x%dII' % num_pointers,
64: '<QQHHHH8x%dQQ' % num_pointers,
}
entry_layouts = {
32: '<IIi',
64: '<QQixxxx',
}
# Calculate the size of the module tables, so that we can determine
# the proper offset for the string pointers. There can be more than
# one module table for macOS executables. Sort the bitnesses so that
# the alignment is correct.
bitnesses = sorted(bitnesses, reverse=True)
pool_offset = 0
for bitness in bitnesses:
pool_offset += (len(moduleList) + 1) * struct.calcsize(entry_layouts[bitness])
# Now we can determine the offset of the blob.
if self.platform.startswith('win'):
# We don't use mmap on Windows. Align just for good measure.
blob_align = 32
else:
# Align to page size, so that it can be mmapped.
blob_align = 4096
# Add padding before the blob if necessary.
blob_offset = len(stub_data)
if (blob_offset & (blob_align - 1)) != 0:
pad = (blob_align - (blob_offset & (blob_align - 1)))
stub_data += (b'\0' * pad)
blob_offset += pad
assert (blob_offset % blob_align) == 0
assert blob_offset == len(stub_data)
# Also determine the total blob size now. Add padding to the end.
blob_size = pool_offset + len(pool)
if blob_size & 31 != 0:
pad = (32 - (blob_size & 31))
blob_size += pad
# Calculate the offsets for the variables. These are pointers,
# relative to the beginning of the blob.
field_offsets = {}
for key, value in fields.items():
if value is not None:
encoded = value.encode('utf-8')
field_offsets[key] = pool_offset + string_offsets[encoded]
# OK, now go and write the blob. This consists of the module table
# (there may be two in the case of a macOS universal (fat) binary).
blob = b""
append_offset = False
for bitness in bitnesses:
entry_layout = entry_layouts[bitness]
header_layout = header_layouts[bitness]
table_offset = len(blob)
for moduleName, offset, size in moduleList:
encoded = moduleName.encode('ascii')
string_offset = pool_offset + string_offsets[encoded]
if size != 0:
offset += pool_offset
blob += struct.pack(entry_layout, string_offset, offset, size)
# A null entry marks the end of the module table.
blob += struct.pack(entry_layout, 0, 0, 0)
flags = 0
if log_append:
flags |= 1
# Compose the header we will be writing to the stub, to tell it
# where to find the module data blob, as well as other variables.
header = struct.pack(header_layout,
blob_offset,
blob_size,
1, # Version number
num_pointers, # Number of pointers that follow
0, # Codepage, not yet used
flags,
table_offset, # Module table pointer.
# The following variables need to be set before static init
# time. See configPageManager.cxx, where they are read.
field_offsets.get('prc_data', 0),
field_offsets.get('default_prc_dir', 0),
field_offsets.get('prc_dir_envvars', 0),
field_offsets.get('prc_path_envvars', 0),
field_offsets.get('prc_patterns', 0),
field_offsets.get('prc_encrypted_patterns', 0),
field_offsets.get('prc_encryption_key', 0),
field_offsets.get('prc_executable_patterns', 0),
field_offsets.get('prc_executable_args_envvar', 0),
field_offsets.get('main_dir', 0),
field_offsets.get('log_filename', 0),
0)
# Now, find the location of the 'blobinfo' symbol in the binary,
# to which we will write our header.
if not self._replace_symbol(stub_data, b'blobinfo', header, bitness=bitness):
# This must be a legacy deploy-stub, which requires the offset to
# be appended to the end.
append_offset = True
# Add the string/code pool.
assert len(blob) == pool_offset
blob += pool
del pool
# Now pad out the blob to the calculated blob size.
if len(blob) < blob_size:
blob += b'\0' * (blob_size - len(blob))
assert len(blob) == blob_size
if append_offset:
# This is for legacy deploy-stub.
print("WARNING: Could not find blob header. Is deploy-stub outdated?")
blob += struct.pack('<Q', blob_offset)
with open(target, 'wb') as f:
f.write(stub_data)
assert f.tell() == blob_offset
f.write(blob)
os.chmod(target, 0o755)
return target
def _get_executable_bitnesses(self, data):
"""Returns the bitnesses (32 or 64) of the given executable data.
This will contain 1 element for non-fat executables."""
if data.startswith(b'MZ'):
# A Windows PE file.
offset, = struct.unpack_from('<I', data, 0x3c)
assert data[offset:offset+4] == b'PE\0\0'
magic, = struct.unpack_from('<H', data, offset + 24)
assert magic in (0x010b, 0x020b)
if magic == 0x020b:
return (64,)
else:
return (32,)
elif data.startswith(b"\177ELF"):
# A Linux/FreeBSD ELF executable.
elfclass = ord(data[4:5])
assert elfclass in (1, 2)
return (elfclass * 32,)
elif data[:4] in (b'\xFE\xED\xFA\xCE', b'\xCE\xFA\xED\xFE'):
# 32-bit Mach-O file, as used on macOS.
return (32,)
elif data[:4] in (b'\xFE\xED\xFA\xCF', b'\xCF\xFA\xED\xFE'):
# 64-bit Mach-O file, as used on macOS.
return (64,)
elif data[:4] in (b'\xCA\xFE\xBA\xBE', b'\xBE\xBA\xFE\xCA'):
# Universal binary with 32-bit offsets.
num_fat, = struct.unpack_from('>I', data, 4)
bitnesses = set()
ptr = 8
for i in range(num_fat):
cputype, cpusubtype, offset, size, align = \
struct.unpack_from('>IIIII', data, ptr)
ptr += 20
if (cputype & 0x1000000) != 0:
bitnesses.add(64)
else:
bitnesses.add(32)
return tuple(bitnesses)
elif data[:4] in (b'\xCA\xFE\xBA\xBF', b'\xBF\xBA\xFE\xCA'):
# Universal binary with 64-bit offsets.
num_fat, = struct.unpack_from('>I', data, 4)
bitnesses = set()
ptr = 8
for i in range(num_fat):
cputype, cpusubtype, offset, size, align = \
struct.unpack_from('>QQQQQ', data, ptr)
ptr += 40
if (cputype & 0x1000000) != 0:
bitnesses.add(64)
else:
bitnesses.add(32)
return tuple(bitnesses)
def _replace_symbol(self, data, symbol_name, replacement, bitness=None):
"""We store a custom section in the binary file containing a header
containing offsets to the binary data.
If bitness is set, and the binary in question is a macOS universal
binary, it only replaces for binaries with the given bitness. """
if data.startswith(b'MZ'):
# A Windows PE file.
pe = pefile.PEFile()
pe.read(io.BytesIO(data))
addr = pe.get_export_address(symbol_name)
if addr is not | |
self._split_groups(np.array(X))
preds = model.predict((X_markers, X_grouping))
return preds
def fit(
self,
params: Dict[str, Any],
X: "npt.ArrayLike",
y: "npt.ArrayLike",
individuals: Optional["npt.ArrayLike"] = None,
grouping: Optional["npt.ArrayLike"] = None,
sample_weights: Optional["npt.ArrayLike"] = None,
**kwargs
):
model = self.model(params)
X_markers, X_grouping = self._split_groups(np.array(X))
model.fit(
(X_markers, X_grouping),
y,
sample_weights=sample_weights,
individuals=grouping
)
return model
def _split_groups(
self,
X: "npt.ArrayLike"
) -> "Tuple[np.ndarray, Optional[np.ndarray]]":
if len(self.grouping_columns) == 0:
return np.array(X), None
grouping_columns = slice(0, len(self.grouping_columns))
marker_columns = slice(len(self.grouping_columns), None)
X_ = np.array(X)
X_grouping = X_[:, grouping_columns]
X_markers = X_[:, marker_columns]
assert X_.shape[1] == (X_grouping.shape[1] + X_markers.shape[1])
return X_markers, X_grouping
def _sample_transformed_target_params(
self,
trial: "optuna.Trial",
options: List[str] = [
"passthrough",
"stdnorm",
"quantile"
],
) -> Dict[str, BaseTypes]:
params = {}
target = trial.suggest_categorical(
"target_transformer",
options
)
params["target_transformer"] = target
if target == "quantile":
params["target_transformer_quantile_distribution"] = (
trial.suggest_categorical(
"target_transformer_quantile_distribution",
["uniform", "normal"]
)
)
return params
def _sample_transformed_target_model(self, params: Dict[str, Any]):
from sklearn.preprocessing import StandardScaler, QuantileTransformer
preprocessor = params["target_transformer"]
if preprocessor == "stdnorm":
g = StandardScaler()
elif preprocessor == "quantile":
d = params["target_transformer_quantile_distribution"]
g = QuantileTransformer(
output_distribution=d,
n_quantiles=min([1000, round(params["nsamples"] / 2)])
)
else:
assert preprocessor == "passthrough"
g = None # Unity function
if g is None:
return None
else:
return g
def _sample_marker_preprocessing_params(
self,
trial: "optuna.Trial",
options: List[str] = [
"drop",
"passthrough",
"maf",
"onehot",
],
) -> Dict[str, BaseTypes]:
params = {}
preprocessor = trial.suggest_categorical(
"marker_preprocessor",
options
)
params["marker_preprocessor"] = preprocessor
return params
def _sample_marker_preprocessing_model(self, params: Dict[str, Any]):
from sklearn.preprocessing import OneHotEncoder
from selectml.sk.preprocessor import (
MAFScaler
)
preprocessor = params["marker_preprocessor"]
if preprocessor == "drop":
g = "drop"
elif preprocessor == "passthrough":
g = "passthrough"
elif preprocessor == "onehot":
g = OneHotEncoder(
categories="auto",
drop=None,
handle_unknown="ignore"
)
elif preprocessor == "maf":
g = MAFScaler(ploidy=self.ploidy)
return g
def _sample_feature_selection_params(
self,
trial: "optuna.Trial",
options: List[str] = ["drop", "passthrough", "rf", "relief"]
) -> Dict[str, BaseTypes]:
params = {}
selector = trial.suggest_categorical("feature_selector", options)
params["feature_selector"] = selector
nmarkers = len(self.marker_columns)
if selector == "rf":
params["feature_selection_rf_min_impurity_decrease"] = (
trial.suggest_float(
"feature_selection_rf_min_impurity_decrease",
0,
10
)
)
params["feature_selection_nfeatures"] = (
trial.suggest_int(
"feature_selection_nfeatures",
min([100, round(nmarkers / 2)]),
nmarkers - 1,
)
)
elif selector == "relief":
params["feature_selection_nfeatures"] = (
trial.suggest_int(
"feature_selection_nfeatures",
min([100, round(nmarkers / 2)]),
nmarkers - 1,
)
)
elif selector == "maf":
params["feature_selection_maf_threshold"] = (
trial.suggest_uniform(
"feature_selection_maf_threshold",
0.01,
0.49,
)
)
return params
def _sample_feature_selection_model(self, params: Dict[str, Any]):
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestRegressor
from ..feature_selection import (MAFSelector, MultiSURF)
selector = params["feature_selector"]
wrap_sfm = False
if selector == "drop":
s = "drop"
elif selector == "passthrough":
s = "passthrough"
elif selector == "rf":
wrap_sfm = True
s = RandomForestRegressor(
criterion="mae",
max_depth=4,
n_estimators=1000,
max_features=0.1,
min_samples_split=5,
min_samples_leaf=1,
min_impurity_decrease=(
params["feature_selection_rf_min_impurity_decrease"]
),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=self.seed,
)
elif selector == "relief":
s = MultiSURF(
n=params["feature_selection_nfeatures"],
nepoch=10,
sd=1,
random_state=self.seed
)
elif selector == "maf":
s = MAFSelector(
threshold=params["feature_selection_maf_threshold"],
ploidy=self.ploidy
)
if wrap_sfm:
nfeatures = params["feature_selection_nfeatures"]
s = SelectFromModel(
estimator=s,
prefit=False,
max_features=nfeatures
)
return s
def sample_preprocessing_params(
self,
trial: "optuna.Trial",
target_options: List[str] = [
"passthrough",
"stdnorm",
"quantile",
],
marker_options: List[str] = [
"maf",
"passthrough",
"onehot",
],
feature_selection_options: List[str] = [
"drop",
"passthrough",
"rf",
],
) -> Dict[str, BaseTypes]:
params = {}
params.update(
self._sample_transformed_target_params(trial, target_options)
)
params.update(self._sample_marker_preprocessing_params(
trial,
marker_options
))
params.update(self._sample_feature_selection_params(
trial,
feature_selection_options
))
return params
def sample_preprocessing_model(self, params: Dict[str, Any]):
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from ..feature_selection import MAFSelector
target = self._sample_transformed_target_model(params)
marker = self._sample_marker_preprocessing_model(params)
feature_selection = self._sample_feature_selection_model(params)
def use_all_columns(X):
return np.repeat(True, X.shape[1])
marker_trans = Pipeline([
(
"maf_filter",
MAFSelector(
threshold=params["min_maf"],
ploidy=self.ploidy
)
),
("marker_scaler", marker),
("feature_selection", feature_selection),
])
grouping_trans = StandardScaler()
return target, grouping_trans, marker_trans
class BGLRBaseModel(OptimiseModel):
def predict(self, model, X):
X_markers, X_grouping = self._split_groups(np.array(X))
preds = model.predict((X_markers, X_grouping))
return preds
def fit(
self,
params: Dict[str, Any],
X: "npt.ArrayLike",
y: "npt.ArrayLike",
individuals: Optional["npt.ArrayLike"] = None,
grouping: Optional["npt.ArrayLike"] = None,
sample_weights: Optional["npt.ArrayLike"] = None,
**kwargs
):
model = self.model(params)
X_markers, X_grouping = self._split_groups(np.array(X))
model.fit(
(X_markers, X_grouping),
y,
sample_weights=sample_weights,
individuals=grouping
)
return model
def _split_groups(
self,
X: "npt.ArrayLike"
) -> "Tuple[np.ndarray, Optional[np.ndarray]]":
if len(self.grouping_columns) == 0:
return np.array(X), None
grouping_columns = slice(0, len(self.grouping_columns))
marker_columns = slice(len(self.grouping_columns), None)
X_ = np.array(X)
X_grouping = X_[:, grouping_columns]
X_markers = X_[:, marker_columns]
assert X_.shape[1] == (X_grouping.shape[1] + X_markers.shape[1])
return X_markers, X_grouping
def _sample_transformed_target_params(
self,
trial: "optuna.Trial",
options: List[str] = [
"passthrough",
"stdnorm",
"quantile"
],
) -> Dict[str, BaseTypes]:
params = {}
target = trial.suggest_categorical(
"target_transformer",
options
)
params["target_transformer"] = target
if target == "quantile":
params["target_transformer_quantile_distribution"] = (
trial.suggest_categorical(
"target_transformer_quantile_distribution",
["uniform", "normal"]
)
)
return params
def _sample_transformed_target_model(self, params: Dict[str, Any]):
from sklearn.preprocessing import StandardScaler, QuantileTransformer
preprocessor = params["target_transformer"]
if preprocessor == "stdnorm":
g = StandardScaler()
elif preprocessor == "quantile":
d = params["target_transformer_quantile_distribution"]
g = QuantileTransformer(
output_distribution=d,
n_quantiles=min([1000, round(params["nsamples"] / 2)])
)
else:
assert preprocessor == "passthrough"
g = None # Unity function
return g
def _sample_feature_selection_params(
self,
trial: "optuna.Trial",
options: List[str] = ["drop", "passthrough", "rf", "relief"]
) -> Dict[str, BaseTypes]:
params = {}
selector = trial.suggest_categorical("feature_selector", options)
params["feature_selector"] = selector
nmarkers = len(self.marker_columns)
if selector == "rf":
params["feature_selection_rf_min_impurity_decrease"] = (
trial.suggest_float(
"feature_selection_rf_min_impurity_decrease",
0,
10
)
)
params["feature_selection_nfeatures"] = (
trial.suggest_int(
"feature_selection_nfeatures",
min([100, round(nmarkers / 2)]),
nmarkers - 1,
)
)
elif selector == "relief":
params["feature_selection_nfeatures"] = (
trial.suggest_int(
"feature_selection_nfeatures",
min([100, round(nmarkers / 2)]),
nmarkers - 1,
)
)
elif selector == "maf":
params["feature_selection_maf_threshold"] = (
trial.suggest_uniform(
"feature_selection_maf_threshold",
0.0001,
0.2,
)
)
return params
def _sample_feature_selection_model(self, params: Dict[str, Any]):
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestRegressor
from ..feature_selection import (MAFSelector, MultiSURF)
selector = params["feature_selector"]
wrap_sfm = False
if selector == "drop":
s = "drop"
elif selector == "passthrough":
s = "passthrough"
elif selector == "rf":
wrap_sfm = True
s = RandomForestRegressor(
criterion="mae",
max_depth=4,
n_estimators=1000,
max_features=0.1,
min_samples_split=5,
min_samples_leaf=1,
min_impurity_decrease=(
params["feature_selection_rf_min_impurity_decrease"]
),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=self.seed,
)
elif selector == "relief":
s = MultiSURF(
n=params["feature_selection_nfeatures"],
nepoch=10,
sd=1,
random_state=self.seed
)
elif selector == "maf":
s = MAFSelector(
threshold=params["feature_selection_maf_threshold"],
ploidy=self.ploidy
)
if wrap_sfm:
nfeatures = params["feature_selection_nfeatures"]
s = SelectFromModel(
estimator=s,
prefit=False,
max_features=nfeatures
)
return s
def _sample_marker_preprocessing_params(
self,
trial: "optuna.Trial",
options: List[str] = [
"maf",
"noia_add"
],
) -> Dict[str, BaseTypes]:
params = {}
preprocessor = trial.suggest_categorical(
"marker_preprocessor",
options
)
params["marker_preprocessor"] = preprocessor
params["noia_dom"] = trial.suggest_categorical(
"noia_dom",
[True, False]
)
params["noia_epi"] = trial.suggest_categorical(
"noia_epi",
[True, False]
)
return params
def _sample_marker_preprocessing_model(self, params: Dict[str, Any]):
from selectml.sk.preprocessor import (
MAFScaler
)
from selectml.sk.distance import (
NOIAAdditiveKernel,
NOIADominanceKernel,
HadamardCovariance,
)
preprocessor = params["marker_preprocessor"]
if preprocessor == "maf":
g = MAFScaler(ploidy=self.ploidy)
elif preprocessor == "noia_add":
g = NOIAAdditiveKernel()
else:
raise ValueError("This shouldn't happen")
if params["noia_dom"]:
d: Optional[NOIADominanceKernel] = NOIADominanceKernel()
else:
d = None
if params["noia_epi"]:
if isinstance(g, NOIAAdditiveKernel):
a = g
train_a = False
else:
a = NOIAAdditiveKernel()
train_a = True
e: Optional[HadamardCovariance] = HadamardCovariance(
a, a,
fit_a=train_a,
fit_b=False
)
else:
e = None
return g, d, e
def _sample_grouping_preprocessing_params(
self,
trial: "optuna.Trial",
grouping_options: List[str] = ["drop", "passthrough",
"onehot", "pca"]
) -> Dict[str, BaseTypes]:
params = {}
if len(self.grouping_columns) > 0:
preprocessor = trial.suggest_categorical(
"grouping_preprocessor",
grouping_options
)
else:
preprocessor = trial.suggest_categorical(
"grouping_preprocessor",
["drop"]
)
params["grouping_preprocessor"] = preprocessor
if preprocessor in ("factor", "pca"):
# 1->1 10->3 100->10
ncomponents = floor(sqrt(len(self.grouping_columns)))
params["grouping_ncomponents"] = trial.suggest_categorical(
"grouping_ncomponents",
[ncomponents]
)
return params
def _sample_grouping_preprocessing_model(self, params: Dict[str, Any]):
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.decomposition import FactorAnalysis, TruncatedSVD
preprocessor = params["grouping_preprocessor"]
if preprocessor == "drop":
g = "drop"
elif preprocessor == "passthrough":
g = "passthrough"
elif preprocessor == "onehot":
g = OneHotEncoder(
categories="auto",
drop="if_binary",
handle_unknown="error",
sparse=False,
)
elif preprocessor == "factor":
g = Pipeline([
(
"ohe",
OneHotEncoder(
categories="auto",
drop=None,
handle_unknown="ignore",
sparse=False
)
),
(
"factor",
FactorAnalysis(n_components=params["grouping_ncomponents"])
)
])
elif preprocessor == "pca":
g = Pipeline([
(
"ohe",
OneHotEncoder(
categories="auto",
drop=None,
handle_unknown="ignore"
)
),
(
"pca",
TruncatedSVD(n_components=params["grouping_ncomponents"])
)
])
return g
def sample_preprocessing_params(
self,
trial: "optuna.Trial",
target_options: List[str] = [
"passthrough",
"stdnorm",
"quantile",
],
marker_options: List[str] = [
"maf",
"noia_add",
],
feature_selection_options: List[str] = [
"drop",
"passthrough",
"rf",
"relief",
],
grouping_options: List[str] = ["passthrough", "onehot"],
) -> Dict[str, BaseTypes]:
params = {}
params.update(
self._sample_transformed_target_params(trial, target_options)
)
params.update(self._sample_marker_preprocessing_params(
trial,
marker_options
))
params.update(self._sample_feature_selection_params(
trial,
feature_selection_options
))
params.update(self._sample_grouping_preprocessing_params(
trial,
grouping_options
))
| |
summary.strip().split(".")[-1].replace(" ", "") + ";"
else:
summary = summary.strip().replace(".prototype", "")
if "Array." in summary and "(" in summary:
funcall = summary.strip().split("Array")[1].split("(")[0].replace(" ", "").strip()
funarg = summary.strip().split("(")[1]
callablefunction = "var output = new Array(randominput)" + funcall + "(" + funarg + ";"
elif "Array" in summary and "(" in summary:
callablefunction = "var output = new " + summary.strip().replace("\d.*", "").replace(".","") + ";"
callablefunction = re.sub(" \d+", " ", callablefunction)
elif "Runtime Semantics: " in summary:
callablefunction = callablefunction.split(".")[0] + "." + summary.split("Runtime Semantics: ")[-1].replace(" ", "") + ";"
elif "Set" in summary:
callablefunction = "var output = randominput." + summary.strip().split(".")[-1].replace(" ", "") + ";"
return callablefunction
# method to: (1) extract the assignment statement inside the body of the relevant section
# and store the variable and corresponding value (or function call) inside the dictonary
# (2) extract the conditional statements and simplify them by substituting the variables (if exists)
# with their values
def extractAssignmentAndConditionals(self, header, body, methodsignature):
sectionid = header.split()[0]
self.variable_dataset.clear()
numvars = 0
for statement in body.split("\n"):
if len(statement) > 100:
continue
statement = statement.replace("\xa0", " ")
isassignment = re.search(assignmentpattern, statement.strip())
if isassignment:
postags = self.nlp.pos_tag(statement)
match = False
for i in range(len(postags)):
if "NN" in postags[i][1]:
match = True
break
if not match:
continue
var, value = self.getAssignment(statement)
var = " " + var + " "
if "." in value and ".length" not in value:
value = value.split(".")[0]
if "the number of elements in" in value:
value = value.replace("the number of elements in", "").strip() + ".length"
self.variable_dataset[var,sectionid] = value
numvars += 1
isexception = re.search(exceptionpattern, statement)
if isexception:
postags = self.nlp.pos_tag(statement)
match = False
for i in range(len(postags)):
if "NN" in postags[i][1]:
match = True
break
if match and "exception." in statement:
errstmt = self.substituteVars(statement.split("exception.")[0], sectionid)
tmpvars = numvars
while(errstmt != self.substituteVars(errstmt, sectionid) and tmpvars>0):
errstmt = self.substituteVars(errstmt, sectionid)
tmpvars -= 1;
if header not in self.template_content:
self.template_content[header] = [methodsignature]
self.template_content[header].append(errstmt)
else:
self.template_content[header].append(errstmt)
isinputoutput1 = re.search(relevantstmtpattern1, statement)
isinputoutput2 = re.search(relevantstmtpattern2, statement)
if isinputoutput1 or isinputoutput2:
postags = self.nlp.pos_tag(statement)
match = False
for i in range(len(postags)):
if "NN" in postags[i][1]:
match = True
break
if not match:
continue
updatedstatement = self.substituteVars(statement, sectionid)
tmpvars = numvars
while(updatedstatement != self.substituteVars(updatedstatement, sectionid) and tmpvars > 0):
updatedstatement = self.substituteVars(updatedstatement, sectionid)
tmpvars -= 1
if header not in self.template_content:
self.template_content[header] = [methodsignature]
self.template_content[header].append(updatedstatement)
else:
self.template_content[header].append(updatedstatement)
# method to process specific symbols and phrases in the
# extracted natural language conditional statements in to
# valid JavaScript symbols and variables
def convertTextToCode(self, text):
text = ' '.join(text.split())
text = text.replace("this value.", "randominput")
text = text.replace("this value", "randominput")
text = text.replace("result of ", "")
text = text.replace("if", "")
text = text.replace(" the ", "")
text = text.replace("Type (", "typeof (")
text = text.replace(" Object ", " \"object\" ")
text = text.replace(" Number ", " \"number\" ")
text = text.replace("empty String", "\"\"")
text = text.replace("empty string", "\"\"")
text = text.replace("String \"NaN\"", "\"NaN\"")
text = text.replace("+∞", "+Infinity")
text = text.replace("-∞", "-Infinity")
text = text.replace("∞", "Infinity")
text = text.replace("‑", "-")
text = text.replace("+0", "0")
text = text.replace("‑0", "-0")
text = text.replace("≥", ">= ")
text = text.replace("> =", ">= ")
text = text.replace("< =", "<= ")
text = text.replace("≤", "<= ")
text = text.replace("! ", "")
text = text.replace("× ", "*")
text = text.replace(" ≠ ", "!= ")
text = text.replace("! =", "!= ")
text = text.replace("! ", "")
text = text.replace("min (", "Math.min (")
text = text.replace("max (", "Math.max (")
text = text.replace("abs (", "Math.abs (")
if "already an integer" in text:
var = text.split("===")[0].split("(")[1]
text = text.replace("already an integer", "parseInt(" + var +", 10)")
if "number of elements in" in text:
text = text.split("number of elements in")[0] + text.split("number of elements in")[1].replace(") ) )", ") ).length )")
if text[-1]==".":
text = text[:-1]
if "===" in text and ("NaN" in text or "-0" in text or "+ 0" in text):
if "&&" in text:
newtext = "("
clauses = text.split("&&")
for idx, clause in enumerate(clauses):
if " === " in clause:
lhs = clause.split("===")[0].strip()
rhs = clause.split("===")[1].strip()
if "NaN" in rhs or "-0" in rhs or "+ 0" in rhs:
if idx == 0:
clause = "Object.is" + lhs + "," + rhs + ")"
else:
clause = "Object.is(" + lhs + "," + rhs + ")"
if idx < len(clauses)-1:
newtext += clause + " && "
else:
newtext += clause
text = newtext
elif "||" in text:
newtext = "("
clauses = text.split("||")
for idx, clause in enumerate(clauses):
if " === " in clause:
lhs = clause.split("===")[0].strip()
rhs = clause.split("===")[1].strip()
if "NaN" in rhs or "-0" in rhs or "+ 0" in rhs:
if idx == 0:
clause = "Object.is" + lhs + "," + rhs + ")"
else:
clause = "Object.is(" + lhs + "," + rhs + ")"
if idx < len(clauses)-1:
newtext += clause + " && "
else:
newtext += clause
text = newtext
else:
lhs = text.split("===")[0].strip()
rhs = text.split("===")[1].strip()
text = "Object.is" + lhs + "," + rhs
return text
# method to generate a compilable test template function using
# the method call and natural language conditional statements identified
# identified using above defined methods for a given relevant section
def generateCompilableTemplate(self, header):
testtemplate = self.template_content[header]
methodname = " ".join(header.split()[1:]).split("(")[0].strip()
templatecount = 0
templates = []
testname = methodname.replace(".","_").replace("-","_").replace(" ", "_").lower()
vardecl = testtemplate[0]
if "..." in vardecl:
vardecl = vardecl.replace("...", "")
if "[" in vardecl:
vardecl = vardecl.replace("[", "")
vardecl = vardecl.replace("]", "")
if "randominput" in vardecl and "output" not in vardecl:
args = "randominput "
else:
args = ""
if "." in vardecl:
arg1 = re.search(argpattern,vardecl.split(".")[0])
arg2 = re.search(argpattern,vardecl.split(".")[1])
if arg1 and len(arg1.group()) >2:
if len(args) > 0:
args = args + "," + arg1.group().replace("(", "").replace(")", "")
else:
args = args + arg1.group().replace("(", "").replace(")", "")
if arg1 and arg2 and len(arg2.group()) >2:
args = args + "," + arg2.group().replace("(", "").replace(")", "").replace("[", "").replace("]", "")
elif arg2 and len(arg2.group()) >2:
if len(args) > 0:
args = args + "," + arg2.group().replace("(", "").replace(")", "").replace("[", "").replace("]", "")
else:
args = args + arg2.group().replace("(", "").replace(")", "").replace("[", "").replace("]", "")
else:
if len(args) > 0:
args = args + "," + vardecl.split("(")[1].split(")")[0]
else:
args = args + vardecl.split("(")[1].split(")")[0]
if "arguments" in args:
args = args.replace("arguments", "args")
if "randominput" not in args and "randominput" in vardecl:
if args=="":
args = "randominput"
else:
args = "randominput," + args
testfunction = "function test_" + testname + "("+ args + "){"
for i in range(1, len(testtemplate)):
templatecount += 1
if "if " not in testtemplate[i]:
continue
# comment out following two lines to generate templates for more sections
# this was made possible by modifying existing patterns and adding more patterns
if "weak" in testfunction or "set_prototype" in testfunction or "regexp_prototype" in testfunction or "get_sharedarraybuffer" in testfunction or "get_map" in testfunction or "number_prototype_tofixed" in testfunction or "sharedarraybuffer" in testfunction or "array_prototype_concat" in testfunction or "array_prototype_push" in testfunction or "array_prototype_sort" in testfunction or "array_prototype_splice" in testfunction or "atomics_wait" in testfunction or "test_number_prototype_tostring" in testfunction or "test_string_raw" in testfunction:
continue
test = ""
testcondition = testtemplate[i]
if "return" in testcondition:
expectedinput = testcondition.split("return")[0].strip().split("if")[1].strip()
expectedinput = self.convertTextToCode(expectedinput)
expectedoutput = self.convertTextToCode(testcondition.split("return")[1].strip())
if self.compiler == "rhino":
test = "if (" + expectedinput + "){\n\t\t" + vardecl + "\n\t\t" + "new TestCase(\"" + testname + "\", \"" + testname + "\", " + expectedoutput + ", output);\n\t\ttest();\n\t\treturn;\n\t\t}"
elif self.compiler == "node":
if "NaN" == expectedoutput.strip():
test = "if (" + expectedinput + "){\n\t\t" + vardecl + "\n\t\t" + "assert.strictEqual(isNaN(output), true);\n\t\tconsole.log(\"Good Test\");\n\t\treturn;\n\t\t}"
else:
test = "if (" + expectedinput + "){\n\t\t" + vardecl + "\n\t\t" + "assert.strictEqual(" + expectedoutput + ", output);\n\t\tconsole.log(\"Good Test\");\n\t\treturn;\n\t\t}"
if test.count("(")!=test.count(")") or "performing" in test or "implementation" in test or "@@" in test or "«" in test or "[" in test or "either " in test or "finite " in test or "atomics_wait" in test or "concatenation" in test or "filler" in test or "searchLength" in test or "-searchStr" in test or " not " in test or "unit value of" in test:
continue
testfunction = testfunction + "\n\t" + test
if "throw" in testcondition:
expectedinput = testcondition.split("throw")[0].split("if")[1].strip()
expectedinput = self.convertTextToCode(expectedinput)
expectedoutput = self.convertTextToCode(testcondition.split("throw")[1].strip())
if self.compiler == "rhino":
test = "if (" + expectedinput + "){\n\t\t try{\n\t\t\t" + vardecl + "\n\t\t\t return;" + "\n\t\t}catch(e){\n\t\t\t" + "new TestCase(\"" + testname + "\", \"" + testname + "\", true, eval(e instanceof " + expectedoutput + "));\n\t\t\ttest();\n\t\t\treturn;\n\t\t}\n\t}"
elif self.compiler == "node":
test = "if (" + expectedinput + "){\n\t\t try{\n\t\t\t" + vardecl + "\n\t\t\tconsole.log(\"Bad Test/Failed Test\");\n\t\t\t return;" + "\n\t\t}catch(e){\n\t\t\t" + "assert.strictEqual(true, eval(e instanceof " + expectedoutput + "));\n\t\t\tconsole.log(\"Good Test\");\n\t\t\treturn;\n\t\t}\n\t}"
if test.count("(")!=test.count(")") or "performing" in test or "implementation" in test or "@@" in test or "«" in test or "[" in test or "either " in test or "finite " in test or "atomics_wait" in test or "concatenation" in test or "filler" in test or "searchLength" in test or "-searchStr" in test or " not " in test or "unit value of" in test:
continue
testfunction = testfunction + "\n\t" + test
if self.compiler == "node":
testfunction = testfunction + "\n\t\tconsole.log(\"OK Test\")\n}"
elif self.compiler == "rhino":
testfunction = testfunction + "\n}"
templates.append(testfunction)
template = ''.join(templates)
if len(testtemplate) > 1 and "if" in template and "unknown" not in template.split("){")[0] and "NewTarget" | |
os.path.join(os.path.dirname(path), newlabel)
# try:
# dobjs = TakeSnapshots([path,])
# os.rename(path, newpath)
# editOk = True
# if dobjs:
# self.RefreshView(dobjs)
# except OSError:
# editOk = False # TODO: notify user of error
return editOk
def DoShowMenu(self, item):
"""Show context menu"""
logger.debug('DoShowMenu')
# Check if click was in blank window area
nodes = self.GetSelections()
activeNode = None
items = [
[ID_NEW, 'New', None , None],
[],
[wx.NewIdRef(), 'Copy', "copy_edit.png", None],
[wx.NewIdRef(), 'Paste', "paste_edit.png", None],
[ID_DELETE_PROJECT, 'Delete', "delete_obj.png", None],
[wx.NewIdRef(), 'Move', None, None],
[ID_RENAME, 'Rename', None, None],
[],
[ID_IMPORT, 'Import', "import_prj.png", None],
[ID_EXPORT, 'Export', "export.png", None],
[],
[ID_REFRESH_TREE_ITEM, 'Refresh', "refresh.png", None],
[ID_CLOSE_PROJECT, 'Close Project', None, None],
[wx.NewIdRef(), 'Close Unreleated Projects', None, None],
# [wx.NewIdRef(), 'Run As', "run_exc.png", None, None],
[wx.NewIdRef(), 'Debug As', "debug_exc.png", None, None],
[],
[ID_PROJECT_PROPERTIES, 'Properties', "project_properties.png", None, None],
]
if len(nodes) == 1:
path = [ self.GetPyData(node) for node in nodes ][0]
logger.debug(f'DoShowMenu:{path}')
extension = path.split('.')[-1]
if os.path.isfile(path):
items.insert(0, [wx.NewIdRef(), 'Open', None, None],)
fileList = []
runList = []
if extension == 'py':
fileList.append([wx.NewIdRef(), 'Python', 'python_module.png'])
runList.append([ID_PYTHON_RUN, 'Python Run', 'python_run.png'])
items.insert(1, [wx.NewIdRef(), 'Open With', None, fileList ],)
items.insert(2, [],)
if extension == 'java':
fileList.append([wx.NewIdRef(), 'Python', 'python_module.png'])
runList.append([wx.NewIdRef(), 'Java Applicaton', 'java_launch.png'])
items.insert(1, [wx.NewIdRef(), 'Open With', None, fileList ],)
items.insert(2, [],)
items.insert(15, [wx.NewIdRef(), 'Run As', "run_exc.png", runList, None],)
else:
pass
try:
activeNode = self.GetPyData(item)
selectedPerspectiveName = None
try:
selectedPerspectiveName = self.GetTopLevelParent().selectedPerspectiveName
except Exception as e:
logger.error(e)
# setting default perspective name as python
selectedPerspectiveName = 'python'
if not self.menu:
popupMenu = wx.Menu()
#
for mi_tup in items:
if len(mi_tup) > 0:
if mi_tup[0] == ID_NEW:
sm = wx.Menu()
for menuItemName in menuItemList[selectedPerspectiveName]:
if len(menuItemName) > 1:
menuItem = wx.MenuItem(sm, menuItemName[0], menuItemName[1])
if menuItemName[2]:
menuItem.SetBitmap(self.fileOperations.getImageBitmap(imageName=menuItemName[2]))
sm.Append(menuItem)
self.Bind(wx.EVT_MENU, lambda e:self.onRightClickMenu(e, file=path), id=menuItemName[0])
else:
sm.AppendSeparator()
popupMenu.Append(mi_tup[0], mi_tup[1], sm)
else:
if mi_tup[3]:
sm = wx.Menu()
for menuItemName in mi_tup[3]:
menuItem = wx.MenuItem(sm, menuItemName[0], menuItemName[1])
if menuItemName[2]:
menuItem.SetBitmap(self.fileOperations.getImageBitmap(imageName=menuItemName[2]))
sm.Append(menuItem)
self.Bind(wx.EVT_MENU, lambda e:self.onRightClickMenu(e, file=path), id=menuItemName[0])
else:
sm.AppendSeparator()
popupMenu.Append(mi_tup[0], mi_tup[1], sm)
else:
mitem = wx.MenuItem(popupMenu, mi_tup[0], mi_tup[1])
if mi_tup[2] is not None:
mitem.SetBitmap(self.fileOperations.getImageBitmap(imageName=mi_tup[2]))
popupMenu.Append(mitem)
self.Bind(wx.EVT_MENU, self.onRightClickMenu, id=mi_tup[0])
else:
popupMenu.AppendSeparator()
# bmp = wx.ArtProvider.GetBitmap(str(mi_tup[2]), wx.ART_MENU)
# mitem.SetBitmap(bmp)
except Exception as e:
logger.error(e)
pass
self.PopupMenu(popupMenu)
# self.menu.Destroy()
#---- End FileTree Interface Methods ----#
def refreshNode(self):
for node in self.GetSelections():
path = self.GetPyData(node)
if os.path.isdir(path) and self.IsExpanded(node):
self.DoItemCollapsed(node)
self.DoItemExpanding(node)
def onRightClickMenu(self, event, file=None):
nodes = self.GetSelections()
file = [ self.GetPyData(node) for node in nodes ][0]
if os.path.isfile(file):
file = os.path.dirname(file)
logger.debug(f'onRightClickMenu: {event.Id}, file:{file} ')
if event.Id == ID_REFRESH_TREE_ITEM:
logger.debug('ID_REFRESH_TREE_ITEM')
self.refreshNode()
if event.Id == ID_NEW_PYTHON_PROJECT:
logger.debug('ID_NEW_PYTHON_PROJECT')
frame = NewProjectFrame(self, title='New Python Project')
frame.Show()
if event.Id == ID_PROJECT_PROPERTIES:
logger.debug('ID_PROJECT_PROPERTIES')
if event.Id == ID_CLOSE_PROJECT:
logger.debug('ID_CLOSE_PROJECT')
if event.Id == ID_DELETE_PROJECT:
logger.debug('ID_DELETE_PROJECT')
for node in self.GetSelections():
logger.debug(f'going to delete: {self.GetItemText(node)}')
path = self.GetPyData(node)
logger.info(f'deleting : {path} ')
name = self.GetItemText(node)
if not self.removeProject(name, path, node):
if path is not None and os.path.isdir(path):
logger.debug(f'shutil.rmtree: {path}')
# shutil.rmtree(path)
elif path and os.path.isfile(path):
logger.debug(f'os.remove:{path}')
os.remove(path)
self.Delete(node)
self.DoItemCollapsed(node)
self.DoItemExpanding(node)
# self.initProjects()
# self.RemoveWatchDirectory(dname)
if event.Id == ID_PYTHON_RUN:
logger.debug('ID_PYTHON_RUN')
if event.Id == ID_RENAME:
logger.debug('ID_RENAME')
self.onF2KeyPress(event)
if event.Id == ID_IMPORT:
logger.debug('ID_IMPORT')
if event.Id == ID_NEW_FILE:
logger.debug('ID_NEW_FILE')
self.newFileFlow(title='New File', file=file)
if event.Id == ID_NEW_FOLDER:
logger.debug('ID_NEW_FOLDER')
self.newFileFlow(title='New Folder', file=file)
# newFileframe = NewFileFrame(self, 'New Folder', selectedPath=file)
# newFileframe.CenterOnScreen()
# newFileframe.Show()
def removeProject(self, name, path, node):
isRemoveProject = False
for project in getWorkspace().projects:
if project.name == name and project.getProjectPath() == path:
self.Delete(node)
datasource = WorkspaceDatasource()
datasource.removeProject(projectName=name)
isRemoveProject = True
break
return isRemoveProject
def newFileFlow(self, title=None, file=None):
newFileframe = NewFileFrame(self, title, selectedPath=file)
newFileframe.CenterOnScreen()
newFileframe.Show()
def OpenFiles(self, filesWithImage=[]):
"""Open the list of files in Editra for editing
@param files: list of file names
"""
to_open = list()
for fileWithImage in filesWithImage:
fname = fileWithImage[0]
try:
res = os.stat(fname)[0]
# isRegularFile or IsDirectory
if stat.S_ISREG(res):
to_open.append(fileWithImage)
elif stat.S_ISDIR(res):
# TODO: need to think on it.
pass
except (IOError, OSError) as msg:
logger.debug("[filebrowser][err] %s" % str(msg))
# TODO : Need to work on it.
if hasattr(self.GetTopLevelParent(), '_mgr'):
for fileWithImage in to_open:
filePath = fileWithImage[0]
fileName = os.path.split(fileWithImage[0])[-1]
file_ext = fileName.split('.')[-1]
window = EditorWindowManager().getWindow(self, filePath)
# mainStc = MainStc(self, text=FileOperations().readFile(filePath=fileWithImage[0]))
# mainStc.SetFileName(filePath)
# mainStc.SetModTime(os.path.getmtime(filePath))
# # mainStc.SetText(FileOperations().readFile(filePath=fileWithImage[0]))
# mainStc.ConfigureLexer(file_ext)
# mainStc.SetModified(False)
# mainStc.SetSavePoint()
imageName = self.iconManager.getFileImageNameByExtension(file_ext)
(name, captionName) = self.getTitleString(window=window, path=fileWithImage[0])
icon = fileWithImage[1]
# imageName=self.iconsDictIndex[extensionName]
self.GetTopLevelParent()._mgr.addTabByWindow(window=window, icon=icon, imageName=imageName, name=f'{name}-{captionName}', captionName=name, tabDirection=5)
# centerPaneTab.window.addTab(name='openFileLoad'+fileName, worksheetPanel=stc)
# win = wx.GetApp().GetActiveWindow()
# if win:
# win.GetNotebook().OnDrop(to_open)
def getTitleString(self, window=None, path=None):
"""Get the title string to display in the MainWindows title bar
@return: (unicode) string
"""
# fname = self.GetFileName()
title = os.path.split(path)[-1]
# Its an unsaved buffer
if not len(title):
title = path = self.GetTabLabel()
if window.GetModify() and not title.startswith(u'*'):
title = u"*" + title
return title, path
def OnCompareItems(self, item1, item2):
"""Handle SortItems"""
data = self.GetPyData(item1)
if data is not None:
path1 = int(not os.path.isdir(data))
else:
path1 = 0
tup1 = (path1, data.lower())
data2 = self.GetPyData(item2)
if data2 is not None:
path2 = int(not os.path.isdir(data2))
else:
path2 = 0
tup2 = (path2, data2.lower())
if tup1 < tup2:
return -1
elif tup1 == tup2:
return 0
else:
return 1
def OnFilesChanged(self, added, deleted, modified):
"""DirectoryMonitor callback - synchronize the view
with the filesystem.
@param added: list of paths added
@param deleted: list of paths removed
@param modified: list of paths modified
"""
nodes = self.GetExpandedNodes()
visible = list()
for node in nodes:
visible.extend(self.GetChildNodes(node))
# Remove any deleted file objects
for fobj in deleted:
for item in visible:
path = self.GetPyData(item)
if fobj.Path == path:
self.Delete(item)
visible.remove(item)
break
# Add any new file objects to the view
pathCache = dict()
needsort = list()
for fobj in added:
# apply filters to any new files
# if not self.ShouldDisplayFile(fobj.Path):
# continue
dpath = os.path.dirname(fobj.Path)
for item in nodes:
path = self.GetPyData(item)
if path == dpath:
# prevent duplicates from being added
if path not in pathCache:
pathCache[path] = self.GetNodePaths(item)
if fobj.Path in pathCache[path]:
continue
self.AppendFileNode(item, fobj.Path)
if item not in needsort:
needsort.append(item)
break
# Re-sort display
for item in needsort:
self.SortChildren(item)
def OnMenu(self, evt):
"""Handle the context menu events for performing
filesystem operations
"""
logger.debug(f'OnMenu{ evt.Id}')
e_id = evt.Id
# path = self._menu.GetUserData('active_node')
# paths = self._menu.GetUserData('selected_nodes')
def Opener(paths):
"""File opener job
@param paths: list of paths
"""
# for fname in paths:
# subprocess.call([FILEMAN_CMD, fname])
# time.sleep(.25)
#
# if e_id == ID_EDIT:
# self.OpenFiles(paths)
# elif e_id == ID_OPEN:
# ed_thread.EdThreadPool().QueueJob(Opener, paths)
# elif e_id == ID_REVEAL:
# dpaths = [os.path.dirname(fname) for fname in paths]
# dpaths = list(set(dpaths))
# ed_thread.EdThreadPool().QueueJob(Opener, dpaths)
# elif e_id == wx.ID_REFRESH:
# # Refresh the view
# self.RefreshView()
# elif e_id == ID_SEARCH_DIR:
# if len(paths):
# path = paths[0] # Go off of the first selected item
# if not os.path.isdir(path):
# path = os.path.dirname(path)
# mdata = dict(mainw=self._mw, lookin=path)
# ed_msg.PostMessage(ed_msg.EDMSG_FIND_SHOW_DLG, mdata)
# elif e_id == ID_GETINFO:
# last = None
# for fname in paths:
# info = ed_mdlg.EdFileInfoDlg(self.TopLevelParent, fname)
# if last is None:
# info.CenterOnParent()
# else:
# lpos = last.GetPosition()
# info.SetPosition((lpos[0] + 14, lpos[1] + 14))
# info.Show()
# last = info
# elif e_id == ID_RENAME:
# item = self._menu.GetUserData('item_id')
# self.EditLabel(item)
# elif e_id == ID_NEW_FOLDER:
# name = wx.GetTextFromUser(_("Enter folder name:"), _("New Folder"),
# parent=self.TopLevelParent)
# if name:
# dobjs = TakeSnapshots([path,])
# err, msg = ebmlib.MakeNewFolder(path, name)
# if not err:
# wx.MessageBox(msg, _("Failed to create folder"),
# style=wx.OK|wx.CENTER|wx.ICON_ERROR)
# else:
# self.RefreshView(dobjs)
# elif e_id == ID_NEW_FILE:
# name = wx.GetTextFromUser(_("Enter file name:"), _("New File"),
# parent=self.TopLevelParent)
# if name:
# dobjs = TakeSnapshots([path,])
# err, msg = ebmlib.MakeNewFile(path, name)
# if not err:
# wx.MessageBox(msg, _("Failed to create file"),
# style=wx.OK|wx.CENTER|wx.ICON_ERROR)
# else:
# self.RefreshView(dobjs)
# elif e_id == ID_DUPLICATE:
# dobjs = TakeSnapshots(paths)
# for fname in paths:
# DuplicatePath(fname)
# self.RefreshView(dobjs)
# elif e_id == ID_ARCHIVE:
# dobjs = TakeSnapshots([path,])
# MakeArchive(path)
# self.RefreshView(dobjs)
# elif e_id == ID_DELETE:
# dobjs = TakeSnapshots(paths)
# MoveToTrash(paths)
# self.RefreshView(dobjs)
# else:
# evt.Skip()
# return
def OnThemeChanged(self, msg):
"""Update the icons when the icon theme has changed
@param msg: Message Object
"""
self.iconManager.RefreshImageList(self.ImageList)
def OnConfig(self, msg):
"""Handle updates for filebrowser preference updates"""
# TODO: refresh tree for hidden files on/off
pass
# @ed_msg.mwcontext
def OnPageClosing(self, msg):
self.isClosing = True
# @ed_msg.mwcontext
def OnPageChange(self, msg):
"""Synchronize selection with the notebook page changes
@param msg: MessageObject
@todo: check if message is from a page closing and avoid updates
"""
if self.isClosing:
self.isClosing = False
return
# if not fbcfg.GetFBOption(fbcfg.FB_SYNC_OPT, True):
# return
nbdata = msg.GetData()
if not nbdata[0]:
return
pg_count = nbdata[0].GetPageCount()
if nbdata[1] > pg_count or nbdata[1] < 0:
# Page | |
0.00000000051 * mu.cost(0.11390643300 + 17402.33328172660 * x)
L1 += 0.00000000065 * mu.cost(1.03099992649 + 4106.40549114620 * x)
L1 += 0.00000000060 * mu.cost(1.00159365247 + 151.89728108520 * x)
L1 += 0.00000000050 * mu.cost(3.84651247899 + 45494.58142974879 * x)
L1 += 0.00000000047 * mu.cost(3.03959709242 + 5408.54382777240 * x)
L1 += 0.00000000063 * mu.cost(4.16165369755 + 8186.51266249260 * x)
L1 += 0.00000000046 * mu.cost(2.69368087378 + 16547.64173406480 * x)
L1 += 0.00000000051 * mu.cost(2.99576014378 + 3774.32416457660 * x)
L1 += 0.00000000044 * mu.cost(2.00664763411 + 6418.14093002680 * x)
L1 += 0.00000000045 * mu.cost(4.01853755929 + 19406.67828817460 * x)
L1 += 0.00000000058 * mu.cost(3.14474753550 + 4025.65648092580 * x)
L1 += 0.00000000050 * mu.cost(2.59881540437 + 6621.85099148600 * x)
L1 += 0.00000000043 * mu.cost(4.87912487459 + 6414.61781167780 * x)
L1 += 0.00000000042 * mu.cost(5.20400092044 + 4447.75123238460 * x)
L1 += 0.00000000051 * mu.cost(1.99634375899 + 5032.77809620220 * x)
L1 += 0.00000000043 * mu.cost(1.28813888865 + 6643.09181776180 * x)
L1 += 0.00000000040 * mu.cost(0.96801618560 + 14591.41182012140 * x)
L1 += 0.00000000039 * mu.cost(1.84985100829 + 10001.48196070061 * x)
L1 += 0.00000000039 * mu.cost(5.69967200167 + 6106.88005506480 * x)
L1 += 0.00000000038 * mu.cost(3.27498743518 + 18052.92954315780 * x)
L1 += 0.00000000039 * mu.cost(2.84167905068 + 6652.77566593180 * x)
L1 += 0.00000000044 * mu.cost(0.57891618854 + 16865.52876963120 * x)
L1 += 0.00000000043 * mu.cost(4.61937364869 + 3341.03250279340 * x)
L1 += 0.00000000042 * mu.cost(6.02555835659 + 6691.86151874940 * x)
L1 += 0.00000000034 * mu.cost(4.97734992350 + 6670.58818804980 * x)
L1 += 0.00000000033 * mu.cost(1.39167727215 + 4825.54491639400 * x)
L1 += 0.00000000035 * mu.cost(6.02955363644 + 3568.08855948880 * x)
L1 += 0.00000000035 * mu.cost(0.31961016732 + 6645.19698672220 * x)
L1 += 0.00000000032 * mu.cost(5.63043769073 + 3511.28529731900 * x)
L1 += 0.00000000031 * mu.cost(5.42978464210 + 9945.57120882380 * x)
L1 += 0.00000000038 * mu.cost(5.66461657503 + 3416.87849797540 * x)
L1 += 0.00000000030 * mu.cost(0.98518793666 + 20426.57109242200 * x)
L1 += 0.00000000038 * mu.cost(0.12870962242 + 6604.95878212400 * x)
L1 += 0.00000000037 * mu.cost(5.48374357342 + 3311.18291816379 * x)
L1 += 0.00000000032 * mu.cost(6.11106979810 + 4392.88080988820 * x)
L1 += 0.00000000031 * mu.cost(3.18481282781 + 3341.04230982650 * x)
L1 += 0.00000000034 * mu.cost(2.32358226279 + 9072.66167112960 * x)
L1 += 0.00000000039 * mu.cost(4.11042361929 + 3312.16323923200 * x)
L1 += 0.00000000027 * mu.cost(0.57810321636 + 3391.89276456221 * x)
L1 += 0.00000000029 * mu.cost(2.48646403166 + 9815.65173166220 * x)
L1 += 0.00000000031 * mu.cost(0.44265747667 + 3451.79906898740 * x)
L1 += 0.00000000027 * mu.cost(6.13498177783 + 3362.46325602620 * x)
L1 += 0.00000000027 * mu.cost(6.21846173482 + 5223.69391980220 * x)
L1 += 0.00000000027 * mu.cost(2.94945830517 + 7203.80227149340 * x)
L1 += 0.00000000027 * mu.cost(3.26179855800 + 8756.26980147300 * x)
L1 += 0.00000000027 * mu.cost(3.94385271700 + 23958.63178523340 * x)
L1 += 0.00000000033 * mu.cost(3.77237326006 + 12808.88030395680 * x)
L1 += 0.00000000030 * mu.cost(4.75096367323 + 15906.76412668260 * x)
L1 += 0.00000000031 * mu.cost(0.88248871193 + 3340.18254357310 * x)
L1 += 0.00000000025 * mu.cost(0.31303295413 + 6571.01853218020 * x)
L1 += 0.00000000031 * mu.cost(4.29076841627 + 10020.85695903120 * x)
L1 += 0.00000000026 * mu.cost(2.22427360058 + 10050.28646756720 * x)
L1 += 0.00000000025 * mu.cost(0.67881122439 + 23937.85638974100 * x)
L1 += 0.00000000031 * mu.cost(1.72899093511 + 13745.34623902240 * x)
L1 += 0.00000000024 * mu.cost(0.20355912395 + 3229.42578441220 * x)
L1 += 0.00000000032 * mu.cost(3.37195631109 + 2284.75361485960 * x)
L2: float = 0
L2 += 0.00058015791 * mu.cost(2.04979463279 + 3340.61242669980 * x)
L2 += 0.00054187645
L2 += 0.00013908426 * mu.cost(2.45742359888 + 6681.22485339960 * x)
L2 += 0.00002465104 * mu.cost(2.80000020929 + 10021.83728009940 * x)
L2 += 0.00000398379 * mu.cost(3.14118428289 + 13362.44970679920 * x)
L2 += 0.00000222022 * mu.cost(3.19436080019 + 3.52311834900 * x)
L2 += 0.00000120957 * mu.cost(0.54325292454 + 155.42039943420 * x)
L2 += 0.00000061517 * mu.cost(3.48529427371 + 16703.06213349900 * x)
L2 += 0.00000053638 * mu.cost(3.54191121461 + 3344.13554504880 * x)
L2 += 0.00000034268 * mu.cost(6.00188499119 + 2281.23049651060 * x)
L2 += 0.00000031665 * mu.cost(4.14015171788 + 191.44826611160 * x)
L2 += 0.00000029839 * mu.cost(1.99870679845 + 796.29800681640 * x)
L2 += 0.00000023168 * mu.cost(4.33403365928 + 242.72860397400 * x)
L2 += 0.00000021659 * mu.cost(3.44532466378 + 398.14900340820 * x)
L2 += 0.00000016044 * mu.cost(6.11000472441 + 2146.16541647520 * x)
L2 += 0.00000020370 * mu.cost(5.42191375400 + 553.56940284240 * x)
L2 += 0.00000014927 * mu.cost(6.09541783564 + 3185.19202726560 * x)
L2 += 0.00000016227 * mu.cost(0.65678953303 + 0.98032106820 * x)
L2 += 0.00000014317 * mu.cost(2.61851897591 + 1349.86740965880 * x)
L2 += 0.00000014416 * mu.cost(4.01923812101 + 951.71840625060 * x)
L2 += 0.00000011934 * mu.cost(3.86122163021 + 6684.74797174860 * x)
L2 += 0.00000015648 * mu.cost(1.22086121940 + 1748.01641306700 * x)
L2 += 0.00000011260 * mu.cost(4.71822363671 + 2544.31441988340 * x)
L2 += 0.00000013352 * mu.cost(0.60189008414 + 1194.44701022460 * x)
L2 += 0.00000010396 * mu.cost(0.25038714677 + 382.89653222320 * x)
L2 += 0.00000009468 * mu.cost(0.68170713564 + 1059.38193018920 * x)
L2 += 0.00000009229 * mu.cost(3.83209092321 + 20043.67456019880 * x)
L2 += 0.00000009005 * mu.cost(3.88271826102 + 3738.76143010800 * x)
L2 += 0.00000007501 * mu.cost(5.46498630412 + 1751.53953141600 * x)
L2 += 0.00000006497 * mu.cost(5.47773072872 + 1592.59601363280 * x)
L2 += 0.00000006311 * mu.cost(2.34104793674 + 3097.88382272579 * x)
L2 += 0.00000006859 * mu.cost(2.57522504136 + 3149.16416058820 * x)
L2 += 0.00000005870 * mu.cost(1.14783576679 + 7.11354700080 * x)
L2 += 0.00000006681 * mu.cost(2.37843690339 + 4136.91043351620 * x)
L2 += 0.00000004647 * mu.cost(4.42957708526 + 6151.53388830500 * x)
L2 += 0.00000004166 * mu.cost(3.68631477611 + 5614.72937620960 * x)
L2 += 0.00000004764 * mu.cost(2.89684755585 + 3333.49887969900 * x)
L2 += 0.00000004045 * mu.cost(6.12493402657 + 5628.95647021120 * x)
L2 += 0.00000003653 * mu.cost(4.06679068397 + 1990.74501704100 * x)
L2 += 0.00000003618 * mu.cost(2.46868561769 + 529.69096509460 * x)
L2 += 0.00000003277 * mu.cost(0.68101740787 + 8962.45534991020 * x)
L2 += 0.00000003253 * mu.cost(2.79565340390 + 3894.18182954220 * x)
L2 += 0.00000003091 * mu.cost(4.56861203364 + 3496.03282613400 * x)
L2 += 0.00000002921 * mu.cost(5.41458945995 + 2914.01423582380 * x)
L2 += 0.00000002921 * mu.cost(1.23050883841 + 2787.04302385740 * x)
L2 += 0.00000002784 * mu.cost(1.38911141844 + 4292.33083295040 * x)
L2 += 0.00000002620 * mu.cost(1.04061894134 + 3341.59274776800 * x)
L2 += 0.00000002888 * mu.cost(3.41062353663 + 3337.08930835080 * x)
L2 += 0.00000002418 * mu.cost(0.96341462666 + 4535.05943692440 * x)
L2 += 0.00000002357 * mu.cost(4.84628239765 + 9492.14631500480 * x)
L2 += 0.00000002593 * mu.cost(5.74934234498 + 3340.59517304760 * x)
L2 += 0.00000002191 * mu.cost(3.26449527357 + 213.29909543800 * x)
L2 += 0.00000002594 * mu.cost(1.49510566123 + 3340.62968035200 * x)
L2 += 0.00000002344 * mu.cost(4.18104725028 + 10025.36039844840 * x)
L2 += 0.00000002630 * mu.cost(4.67640929857 + 3583.34103067380 * x)
L2 += 0.00000002602 * mu.cost(2.64911714813 + 2388.89402044920 * x)
L2 += 0.00000001830 * mu.cost(0.97181050149 + 1589.07289528380 * x)
L2 += 0.00000002416 * mu.cost(1.04749173375 + 4399.99435688900 * x)
L2 += 0.00000002386 * mu.cost(4.27072575550 + 7079.37385680780 * x)
L2 += 0.00000002187 * mu.cost(0.16036551231 + 6525.80445396540 * x)
L2 += 0.00000002344 * mu.cost(0.01425578204 + 4690.47983635860 * x)
L2 += 0.00000001617 * mu.cost(4.95614491689 + 5088.62883976680 * x)
L2 += 0.00000001633 * mu.cost(1.10703599922 + 12303.06777661000 * x)
L2 += 0.00000002126 * mu.cost(0.48290227706 + 2700.71514038580 * x)
L2 += 0.00000001629 * mu.cost(4.94267977718 + 1221.84856632140 * x)
L2 += 0.00000001504 * mu.cost(0.11031912519 + 2957.71589447660 * x)
L2 += 0.00000001759 * mu.cost(3.81170701376 + 3723.50895892300 * x)
L2 += 0.00000001401 * mu.cost(3.85907867678 + 6283.07584999140 * x)
L2 += 0.00000001338 * mu.cost(5.29685392418 + 6677.70173505060 * x)
L2 += 0.00000001763 * mu.cost(2.51660121293 + 2810.92146160520 * x)
L2 += 0.00000001392 * mu.cost(2.73498041122 + 7477.52286021600 * x)
L2 += 0.00000001431 * mu.cost(2.97747408389 + 6489.77658728800 * x)
L2 += 0.00000001236 * mu.cost(3.77245965590 + 2699.73481931760 * x)
L2 += 0.00000001234 * mu.cost(1.88931735265 + 6681.24210705180 * x)
L2 += 0.00000001513 * mu.cost(2.92614134711 + 640.87760738220 * x)
L2 += 0.00000001234 * mu.cost(6.14168429036 + 6681.20759974740 * x)
L2 += 0.00000001408 * mu.cost(1.54395721611 + 3347.72597370060 * x)
L2 += 0.00000001038 * mu.cost(5.82880072933 + 4933.20844033260 * x)
L2 += 0.00000001156 * mu.cost(1.50825464304 + 426.59819087600 * x)
L2 += 0.00000001362 * mu.cost(4.17794297520 + 23384.28698689860 * x)
L2 += 0.00000001135 * mu.cost(3.77506455273 + 3870.30339179440 * x)
L2 += 0.00000000916 * mu.cost(3.81726339298 + 5092.15195811580 * x)
L2 += 0.00000000853 * mu.cost(3.82520490669 + 3340.54511639700 * x)
L2 += 0.00000001077 * mu.cost(5.05062828760 + 5621.84292321040 * x)
L2 += 0.00000001074 * mu.cost(3.81446920470 + 3553.91152213780 * x)
L2 += 0.00000000847 * mu.cost(3.41702696402 + 3340.67973700260 * x)
L2 += 0.00000000920 * mu.cost(1.91108056416 + | |
{"Human": None}, {"Human": {"id": {"new_id"}}}
)
self.assertEqual(
expected_error_message,
str(e.exception),
)
def test_field_renaming_in_interfaces(self) -> None:
with self.assertRaises(NotImplementedError):
rename_schema(
parse(ISS.multiple_interfaces_schema), {}, {"Character": {"id": {"new_id"}}}
)
with self.assertRaises(NotImplementedError):
rename_schema(
parse(ISS.multiple_interfaces_schema), {}, {"Character": {"id": {"id", "new_id"}}}
)
with self.assertRaises(NotImplementedError):
rename_schema(parse(ISS.multiple_interfaces_schema), {}, {"Character": {"id": set()}})
with self.assertRaises(NotImplementedError):
# Cannot rename Human's fields because Human implements an interface and field_renamings
# for object types that implement interfaces isn't supported yet.
rename_schema(parse(ISS.multiple_interfaces_schema), {}, {"Human": {"id": {"new_id"}}})
with self.assertRaises(NotImplementedError):
rename_schema(
parse(ISS.multiple_interfaces_schema), {}, {"Human": {"id": {"id", "new_id"}}}
)
with self.assertRaises(NotImplementedError):
rename_schema(parse(ISS.multiple_interfaces_schema), {}, {"Human": {"id": set()}})
def test_enum_rename(self) -> None:
renamed_schema = rename_schema(
parse(ISS.enum_schema), {"Droid": "NewDroid", "Height": "NewHeight"}, {}
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type NewDroid {
height: NewHeight
}
type SchemaQuery {
NewDroid: NewDroid
}
enum NewHeight {
TALL
SHORT
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual(
{"NewDroid": "Droid", "NewHeight": "Height"}, renamed_schema.reverse_name_map
)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_enum_suppression(self) -> None:
with self.assertRaises(NotImplementedError):
rename_schema(parse(ISS.multiple_enums_schema), {"Size": None}, {})
def test_interface_rename(self) -> None:
renamed_schema = rename_schema(
parse(ISS.interface_schema), {"Kid": "NewKid", "Character": "NewCharacter"}, {}
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
interface NewCharacter {
id: String
}
type NewKid implements NewCharacter {
id: String
}
type SchemaQuery {
NewCharacter: NewCharacter
NewKid: NewKid
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual(
{"NewKid": "Kid", "NewCharacter": "Character"}, renamed_schema.reverse_name_map
)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_suppress_interface_implementation(self) -> None:
with self.assertRaises(NotImplementedError):
rename_schema(parse(ISS.various_types_schema), {"Giraffe": None}, {})
def test_suppress_all_implementations_but_not_interface(self) -> None:
with self.assertRaises(NotImplementedError):
rename_schema(parse(ISS.various_types_schema), {"Giraffe": None, "Human": None}, {})
def test_suppress_interface_but_not_implementations(self) -> None:
with self.assertRaises(NotImplementedError):
rename_schema(parse(ISS.various_types_schema), {"Character": None}, {})
def test_suppress_interface_and_all_implementations(self) -> None:
with self.assertRaises(NotImplementedError):
rename_schema(
parse(ISS.various_types_schema),
{"Giraffe": None, "Character": None, "Human": None},
{},
)
def test_multiple_interfaces_rename(self) -> None:
renamed_schema = rename_schema(
parse(ISS.multiple_interfaces_schema),
{"Human": "NewHuman", "Character": "NewCharacter", "Creature": "NewCreature"},
{},
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
interface NewCharacter {
id: String
}
interface NewCreature {
age: Int
}
type NewHuman implements NewCharacter & NewCreature {
id: String
age: Int
}
type SchemaQuery {
NewCharacter: NewCharacter
NewCreature: NewCreature
NewHuman: NewHuman
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual(
{"NewHuman": "Human", "NewCharacter": "Character", "NewCreature": "Creature"},
renamed_schema.reverse_name_map,
)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_scalar_rename(self) -> None:
renamed_schema = rename_schema(parse(ISS.scalar_schema), {"Date": "NewDate"}, {})
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
directive @stitch(source_field: String!, sink_field: String!) on FIELD_DEFINITION
type Human {
id: String
birthday: NewDate
}
scalar NewDate
type SchemaQuery {
Human: Human
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual(
{"NewDate": "Date"},
renamed_schema.reverse_name_map,
)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_scalar_suppress(self) -> None:
renamed_schema = rename_schema(
parse(ISS.scalar_schema), {"Date": None}, {"Human": {"birthday": set()}}
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
directive @stitch(source_field: String!, sink_field: String!) on FIELD_DEFINITION
type Human {
id: String
}
type SchemaQuery {
Human: Human
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual(
{},
renamed_schema.reverse_name_map,
)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_builtin_rename(self) -> None:
with self.assertRaises(NotImplementedError):
rename_schema(parse(ISS.list_schema), {"String": "NewString"}, {})
def test_union_rename(self) -> None:
renamed_schema = rename_schema(
parse(ISS.union_schema), {"HumanOrDroid": "NewHumanOrDroid", "Droid": "NewDroid"}, {}
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Human {
id: String
}
type NewDroid {
id: String
}
union NewHumanOrDroid = Human | NewDroid
type SchemaQuery {
Human: Human
NewDroid: NewDroid
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual(
{"NewDroid": "Droid", "NewHumanOrDroid": "HumanOrDroid"},
renamed_schema.reverse_name_map,
)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_entire_union_suppress(self) -> None:
renamed_schema = rename_schema(
parse(ISS.union_schema), {"HumanOrDroid": None, "Droid": "NewDroid"}, {}
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Human {
id: String
}
type NewDroid {
id: String
}
type SchemaQuery {
Human: Human
NewDroid: NewDroid
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual(
{"NewDroid": "Droid"},
renamed_schema.reverse_name_map,
)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_union_member_suppress(self) -> None:
renamed_schema = rename_schema(parse(ISS.union_schema), {"Droid": None}, {})
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Human {
id: String
}
union HumanOrDroid = Human
type SchemaQuery {
Human: Human
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual(
{},
renamed_schema.reverse_name_map,
)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_list_rename(self) -> None:
renamed_schema = rename_schema(
parse(ISS.list_schema),
{
"Droid": "NewDroid",
"Character": "NewCharacter",
"Height": "NewHeight",
},
{},
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type NewDroid implements NewCharacter {
id: String
heights: [NewHeight]
dates: [Date]
friends: [NewDroid]
enemies: [NewCharacter]
}
type SchemaQuery {
NewDroid: [NewDroid]
}
scalar Date
interface NewCharacter {
id: String
}
enum NewHeight {
TALL
SHORT
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual(
{
"NewCharacter": "Character",
"NewDroid": "Droid",
"NewHeight": "Height",
},
renamed_schema.reverse_name_map,
)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_non_null_rename(self) -> None:
renamed_schema = rename_schema(parse(ISS.non_null_schema), {"Dog": "NewDog"}, {})
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type NewDog {
id: String!
friend: NewDog!
}
type Cat {
id: String
}
type SchemaQuery {
NewDog: NewDog!
Cat: Cat
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({"NewDog": "Dog"}, renamed_schema.reverse_name_map)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_non_null_suppress(self) -> None:
renamed_schema = rename_schema(parse(ISS.non_null_schema), {"Dog": None}, {})
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Cat {
id: String
}
type SchemaQuery {
Cat: Cat
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({}, renamed_schema.reverse_name_map)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_directive_renaming_illegal_noop(self) -> None:
# This renaming is illegal because directives can't be renamed, so the
# "stitch" -> "NewStitch" mapping is a no-op
with self.assertRaises(NoOpRenamingError):
rename_schema(
parse(ISS.directive_schema),
{
"stitch": "NewStitch",
},
{},
)
def test_query_type_field_argument_illegal_noop(self) -> None:
# This renaming is illegal because query type field arguments can't be renamed, so the
# "id" -> "Id" mapping is a no-op
schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type SchemaQuery {
Human(id: String!): Human
}
type Human {
name: String
}
"""
)
with self.assertRaises(NoOpRenamingError):
rename_schema(parse(schema_string), {"id": "Id"}, {})
def test_clashing_type_rename(self) -> None:
schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Human1 {
id: String
}
type Human2 {
id: String
}
type SchemaQuery {
Human1: Human1
Human2: Human2
}
"""
)
with self.assertRaises(SchemaRenameNameConflictError):
rename_schema(parse(schema_string), {"Human1": "Human", "Human2": "Human"}, {})
def test_clashing_type_single_rename(self) -> None:
schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Human {
id: String
}
type Human2 {
id: String
}
type SchemaQuery {
Human: Human
Human2: Human2
}
"""
)
with self.assertRaises(SchemaRenameNameConflictError):
rename_schema(parse(schema_string), {"Human2": "Human"}, {})
def test_clashing_scalar_type_rename(self) -> None:
schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Human {
id: String
}
scalar SCALAR
type SchemaQuery {
Human: Human
}
"""
)
with self.assertRaises(SchemaRenameNameConflictError):
rename_schema(parse(schema_string), {"Human": "SCALAR"}, {})
def test_builtin_type_conflict_rename(self) -> None:
schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Human {
id: String
}
type SchemaQuery {
Human: Human
}
"""
)
with self.assertRaises(SchemaRenameNameConflictError):
rename_schema(parse(schema_string), {"Human": "String"}, {})
def test_multiple_naming_conflicts(self) -> None:
schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Dog {
nickname: String
age: Int
}
type Cat {
nickname: String
age: Int
}
type Droid {
id: String
friends: [Droid]
}
type Human {
id: String
name: String
age: Int
}
type SchemaQuery {
Dog: Dog
Cat: Cat
Droid: Droid
Human: Human
}
"""
)
clashing_type_rename_error_message = (
"Applying the renaming would produce a schema in which multiple types have the "
"same name, which is an illegal schema state. To fix this, modify the type_renamings "
"argument of rename_schema to ensure that no two types in the renamed schema have "
"the same name. The following is a list of tuples that describes what needs to be "
"fixed. Each tuple is of the form (new_type_name, original_schema_type_names) "
"where new_type_name is the type name that would appear in the new schema and "
"original_schema_type_names is a list of types in the original schema that get "
"mapped to new_type_name: [('Droid', ['Dog', 'Droid', 'Human'])]"
)
type_rename_to_builtin_error_message = (
"Applying the renaming would rename type(s) to a name already used by a built-in "
"GraphQL scalar type. To fix this, ensure that no type name is mapped to a "
"scalar's name. The following is a list of tuples that describes what needs to be "
"fixed. Each tuple is of the form (type_name, | |
#region Information
#endregion
#region Module Imports
import bpy
import bmesh
import random
from mathutils import Vector, Matrix
from math import sqrt
from bpy.props import *
from bpy.types import (Panel,Menu,Operator,PropertyGroup)
from .. modules.spatial import measure
#endregion
#region Operators
class BYGEN_OT_Scatter_City_Circular(bpy.types.Operator):
bl_idname = "object.bygen_scatter_city_circular"
bl_label = "City Scatter - Circular"
bl_description = "Scatters objects like a city"
bl_options = {'REGISTER', 'UNDO'}
# Operator Properties
seed_value : IntProperty(
name = "Seed",
description = "Seed for randomisation",
default = 1,
min = 1,
max = 1000000
)
rad : FloatProperty(
name = "Radius",
description = "Radius of the city scattering",
default = 20.0,
min = 1.0,
max = 100000
)
maxb : IntProperty(
name = "Max Buildings",
description = "Maximum number of building objects",
default = 200,
min = 1,
max = 100000
)
collection_large : StringProperty(
name = "Collection Large",
description = "The collection to choose an object from",
default = "Buildings_Large"
)
rad_large : FloatProperty(
name = "Large Radius",
description = "Boundary for large objects",
default = 4.0,
min = 0.0,
max = 100000
)
collection_medium : StringProperty(
name = "Collection - Medium",
description = "Medium sized objects to choose from",
default = "Buildings_Medium"
)
rad_medium : FloatProperty(
name = "Medium Radius",
description = "Boundary for medium objects",
default = 13,
min = 0.0,
max = 100000
)
collection_small : StringProperty(
name = "Collection - Small",
description = "Small sized objects to choose from",
default = "Buildings_Small"
)
rad_small : FloatProperty(
name = "Small Radius",
description = "Boundary for small objects",
default = 15.0,
min = 0.0,
max = 100000
)
rotation_variation : BoolProperty(
name = "Rotation Variation",
description = "Allow variation in the rotation of objects",
default = True
)
threshold : FloatProperty(
name = "Neighbour Threshold",
description = "Minimum distance between neighbouring buildings",
default = 2,
min = 0.0,
max = 10000
)
max_cycles : IntProperty(
name = "Max Cycles",
description = "Maximum number of loop times to prevent infinite loops",
default = 1000,
min = 5,
max = 100000
)
def execute(self, context):
# Setting up scene context
scene = context.scene
# Preparing Random Seed
random.seed(self.seed_value)
# Initializing number_of_buildings
number_of_buildings = 0
# Initializing number_of_cycles
number_of_cycles = 0
# Preparing common radians
r90 = 1.570796
r180 = 3.141593
# Initializing the cellList
cellList = []
# Get the collection references
# Large Objects
objCollectionLarge = None
if self.collection_large in bpy.data.collections:
objCollectionLarge = bpy.data.collections[self.collection_large]
#M edium Objects
objCollectionMedium = None
if self.collection_medium in bpy.data.collections:
objCollectionMedium = bpy.data.collections[self.collection_medium]
# Small Objects
objCollectionSmall = None
if self.collection_small in bpy.data.collections:
objCollectionSmall = bpy.data.collections[self.collection_small]
# Look for generation result collection
resultCollection = None
if "Generation Result" in bpy.data.collections:
resultCollection = bpy.data.collections["Generation Result"]
# Clear anything in generation result
if len(resultCollection.objects)>0:
if len(bpy.context.selected_objects)>0:
for so in bpy.context.selected_objects:
so.select_set(False)
for oldObj in resultCollection.objects:
oldObj.select_set(True)
bpy.ops.object.delete()
else:
bpy.data.collections.new("Generation Result")
resultCollection = bpy.data.collections["Generation Result"]
bpy.context.scene.collection.children.link(resultCollection)
# Begin generation procedure
# While number of generated buildings is less than max:
while (number_of_buildings < self.maxb) and (number_of_cycles < self.max_cycles):
# Immediatly increment number_of_cycles
number_of_cycles+=1
# Get random Vector on XY plane around origin:
posx = random.uniform(-self.rad,self.rad)
posy = random.uniform(-self.rad,self.rad)
newpos = Vector((posx,posy,0.0))
origin = Vector((0.0,0.0,0.0))
# If new vector is within radius:
if measure(newpos,origin) <= self.rad:
# Good position, check existing entries
canCreate = False
# If buildings already placed:
if len(cellList) > 0:
# Assume distance is clear until proven wrong:
distanceClear = True
# For every old building:
for cell in cellList:
# If distance between new vector and old building large enough:
if measure(newpos, cell) > self.threshold:
# Distance is clear, leave boolean true
pass
else:
# Distance not clear, make boolean false
distanceClear = False
# Considering clear distance and changing canCreate
if distanceClear == True:
canCreate = True
if distanceClear == False:
canCreate = False
else:
# Nothing in cell list, allow first object:
canCreate = True
# If we are allowed to create object:
if canCreate:
# Create Object here
# Deciding which collection to take an object from:
vecdist = measure(newpos, origin)
objtype = 0
objCollection = objCollectionLarge
if vecdist < self.rad_large:
objCollection = objCollectionLarge
if vecdist > self.rad_large and vecdist < self.rad_medium:
objCollection = objCollectionMedium
if vecdist > self.rad_medium:# and vecdist < self.rad_small:
objCollection = objCollectionSmall
# Prep and future prefix check:
object_names = []
if objCollection is not None:
for obj_ref in objCollection.objects:
if 'prefix' not in obj_ref.name:
object_names.append(obj_ref.name)
# If the number of objects in collection is more than 0:
if len(object_names) > 0:
# Selecting the original object to duplicate:
randID = random.randint(0,len(object_names)-1)
# Creating the new object:
new_obj = objCollection.objects[object_names[randID]]
new_object = bpy.data.objects.new(name=new_obj.name, object_data=new_obj.data)
# Linking new object to result collection:
resultCollection.objects.link(new_object)
# Checking viewport and render visibility:
new_object.hide_viewport = False
new_object.hide_render = False
# Moving the new object to good vector:
new_object.location = newpos
# Rotating the new object:
if self.rotation_variation:
rotObj = random.randint(0,2)
if rotObj == 1:
rotChoice = random.randint(0,3)
if rotChoice == 0:
# 90+
old_euler = new_object.rotation_euler
old_euler[2] += r90
new_object.rotation_euler = old_euler
if rotChoice == 1:
# 90-
old_euler = new_object.rotation_euler
old_euler[2] -= r90
new_object.rotation_euler = old_euler
if rotChoice == 2:
# 180
old_euler = new_object.rotation_euler
old_euler[2] += r180
new_object.rotation_euler = old_euler
# Add new Vector to cellList
cellList.append(newpos)
# Add to the number_of_buildings value:
number_of_buildings+=1
# Completed building creation
else:
# Not a good position, re-roll cycle
pass
return {'FINISHED'}
# Rectangular City Scatter Method:
class BYGEN_OT_Scatter_City_Rectangular(bpy.types.Operator):
bl_idname = "object.bygen_scatter_city_rectangular"
bl_label = "City Scatter - Rectangular"
bl_description = "Scatters objects like a city"
bl_options = {'REGISTER', 'UNDO'}
# Operator Properties:
seed_value : IntProperty(
name = "Seed",
description = "Seed for randomisation",
default = 1,
min = 1,
max = 1000000
)
collection_name : StringProperty(
name = "Collection Name",
description = "The collection to choose an object from",
default = "Buildings"
)
random_placement : BoolProperty(
name = "Random Placement",
description = "Randomize whether a building will be placed",
default = False
)
rotation_variation : BoolProperty(
name = "Rotation Variation",
description = "Allow variation in the rotation of objects",
default = True
)
city_offset : FloatProperty(
name = "City Offset",
description = "Position offset for the origin of the city",
default = 10
)
x_size : IntProperty(
name = "X-Size",
description = "Size of grid X axis",
default = 10,
min = 1,
max = 1000000
)
y_size : IntProperty(
name = "Y-Size",
description = "Size of grid Y axis",
default = 10,
min = 1,
max = 1000000
)
cell_size : IntProperty(
name = "Cell Size",
description = "Size of a grid cell",
default = 2,
min = 1,
max = 1000000
)
def execute(self, context):
# Setting up context
scene = context.scene
# Preparing Random Seed
random.seed(self.seed_value)
# Pseudo procedure:
# 1 - Get 2D cell grid
cellList = []
x_count = self.x_size
y_count = self.y_size
# Preparing radian vars for angle rotations
r90 = 1.570796
r180 = 3.141593
# Get the collection reference
objCollection = None
if self.collection_name in bpy.data.collections:
objCollection = bpy.data.collections[self.collection_name]
resultCollection = None
if "Generation Result" in bpy.data.collections:
resultCollection = bpy.data.collections["Generation Result"]
# Clear anything in generation result
if len(resultCollection.objects)>0:
if len(bpy.context.selected_objects)>0:
for so in bpy.context.selected_objects:
so.select_set(False)
for oldObj in resultCollection.objects:
oldObj.select_set(True)
bpy.ops.object.delete()
else:
bpy.data.collections.new("Generation Result")
resultCollection = bpy.data.collections["Generation Result"]
bpy.context.scene.collection.children.link(resultCollection)
# Loop variables
column_index = 0
curX = (0-self.cell_size) - self.city_offset
curY = (0-self.cell_size)
currentCell = Vector((0,0,0))
# Look through X axis (columns)
while column_index < x_count:
# Assessing columns
curX += self.cell_size
currentCell[1] = 0 - self.city_offset
row_index = 0
# Look through Y axis (rows)
while row_index < y_count:
# Assessing rows
newCell = Vector((curX, currentCell[1]+self.cell_size, 0))
currentCell = newCell
storeCell = random.randint(0,1)
if self.random_placement:
if storeCell == 1:
cellList.append(Vector((currentCell[0],currentCell[1],currentCell[2])))
else:
cellList.append(Vector((currentCell[0],currentCell[1],currentCell[2])))
row_index += 1
column_index+=1
# Begin creating | |
self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
form = super(MarkDistributionCreate, self).get_form(**kwargs)
form.fields['class_name'].widget.attrs = {'class': 'basic-multiple'}
form.fields['subject_name'].widget.attrs = {'class': 'basic-multiple'}
form.fields['class_name'].queryset = Classes.objects.filter(module_holder=module_holder)
form.fields['subject_name'].queryset = Subject.objects.filter(module_holder=module_holder)
return form
def get_context_data(self, **kwargs):
context = super(MarkDistributionCreate, self).get_context_data(**kwargs)
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
context['markdist'] = MarkDistribution.objects.filter(module_holder=module_holder)
return context
class MarkDistributionUpdate(PermissionRequiredMixin,SuccessMessageMixin, LoginRequiredMixin ,UpdateView):
model = MarkDistribution
fields = ['class_name','subject_name','exam','attendance','class_test','assignment']
login_url = 'home:login'
context_object_name = 'markdist'
template_name = 'student/mark_distribution.html'
permission_required = 'student.change_markdistribution'
success_message = 'Mark Distribution has been updated!'
def form_valid(self, form):
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
form.instance.module_holder = module_holder
return super().form_valid(form)
def get_form(self, **kwargs):
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
form = super(MarkDistributionUpdate, self).get_form(**kwargs)
form.fields['class_name'].widget.attrs = {'class': 'basic-multiple'}
form.fields['subject_name'].widget.attrs = {'class': 'basic-multiple'}
form.fields['class_name'].queryset = Classes.objects.filter(module_holder=module_holder)
form.fields['subject_name'].queryset = Subject.objects.filter(module_holder=module_holder)
return form
def get_context_data(self, **kwargs):
context = super(MarkDistributionUpdate, self).get_context_data(**kwargs)
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
context['markdist'] = MarkDistribution.objects.filter(module_holder=module_holder)
return context
class MarkDistributionDelete(PermissionRequiredMixin, SuccessMessageMixin, LoginRequiredMixin, DeleteView):
login_url = 'home:login'
permission_required = 'student.delete_markdistribution'
success_message = 'Mark Distribution has been deleted!'
model = MarkDistribution
success_url = reverse_lazy('student:mark_distribution_create')
# ADMISSION CLASSES START HERE
class AdmissionCreate(PermissionRequiredMixin, SuccessMessageMixin, LoginRequiredMixin, CreateView):
login_url = 'home:login'
model = Admission
template_name = 'student/admission.html'
permission_required = 'student.add_admission'
success_message = 'Admission has been Created!'
form_class = AdmissionForm
# success_url = '/student/admission/view/'
def form_valid(self, form):
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
# password = <PASSWORD>(form.instance.password)
# form.instance.password = password
form.instance.module_holder = module_holder
return super().form_valid(form)
def form_invalid(self, form):
password = <PASSWORD>(form.instance.password)
form.instance.password = password
print('================1============', form.instance.password)
print('================2============', form.instance.password)
return super().form_invalid(form)
# def post(self, request, *args, **kwargs):
# return CreateView(request, *args, **kwargs)
# # form = self.get_form()
# # self.object = self.get_object()
# # if form.is_valid():
# # return self.form_valid(form)
# # else:
# # return self.form_invalid(form)
def get_form(self, **kwargs):
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
last_id = Admission.objects.filter(module_holder=module_holder).first()
# print('last ID==================',last_id.admission_registration)
if last_id is None:
last_id = '0'
else:
last_id = last_id.admission_registration
form = super(AdmissionCreate, self).get_form(**kwargs)
# form.fields['gender'].widget = forms.RadioSelect()
form.fields['admission_registration'].widget = forms.TextInput(attrs={'value': generate_reg(last_id), 'readonly': 'readonly'})
# form.fields['free_student'].widget = forms.RadioSelect()
form.fields['admission_date'].widget = forms.TextInput(attrs = {'type': 'date','value':timezone.now().strftime('%Y-%m-%d')})
form.fields['date_of_birth'].widget = forms.TextInput(attrs = {'type': 'date'})
form.fields['cast'] = forms.CharField(required=False)
form.fields['photo'] = forms.ImageField(required=False)
form.fields['annual_fund'] = forms.CharField(required=False)
form.fields['admission_class'].queryset = Classes.objects.filter(module_holder=module_holder)
form.fields['admission_section'].queryset = Section.objects.filter(module_holder=module_holder)
form.fields['admission_class'].widget.attrs = {'class': 'basic-multiple'}
form.fields['admission_section'].widget.attrs = {'class': 'basic-multiple'}
# form.fields['photo'].widget = forms.TextInput(attrs={'type': 'file', 'class': 'custom-input-file custom-input-file--2'})
return form
def get_context_data(self, **kwargs):
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
context = super(AdmissionCreate, self).get_context_data(**kwargs)
context['classesform'] = ClassesForm(module_holder)
context['sectionform'] = SectionForm()
context['admissioncreateurl'] = '/student/admission/create/'
return context
class AdmissionUpdate(PermissionRequiredMixin, SuccessMessageMixin, LoginRequiredMixin, UpdateView):
login_url = 'home:login'
model = Admission
fields = admissionfields
template_name = 'student/admission.html'
permission_required = 'student.change_admission'
success_message = 'Student data has been updated!'
# success_url = '/student/admission/view/'
def form_valid(self, form):
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
form.instance.module_holder = module_holder
return super().form_valid(form)
def get_form(self, **kwargs):
form = super(AdmissionUpdate, self).get_form(**kwargs)
form.fields['gender'].widget = forms.RadioSelect()
form.fields['admission_registration'].widget = forms.TextInput({'readonly': 'readonly'})
form.fields['free_student'].widget = forms.RadioSelect()
form.fields['admission_date'].widget = forms.TextInput(attrs = {'type': 'date','value':timezone.now().strftime('%Y-%m-%d')})
form.fields['date_of_birth'].widget = forms.TextInput(attrs = {'class': 'date'})
form.fields['cast'] = forms.CharField(required=False)
form.fields['photo'] = forms.ImageField(required=False)
form.fields['annual_fund'] = forms.CharField(required=False)
form.fields['admission_section'].widget.attrs={'class': 'basic-multiple'}
form.fields['admission_class'].widget.attrs['class']='basic-multiple'
form.fields['admission_class'].queryset = Classes.objects.filter(module_holder=self.request.user)
form.fields['admission_section'].queryset = Section.objects.filter(module_holder=self.request.user)
form.fields['admission_class'].widget.attrs = {'class': 'basic-multiple'}
form.fields['admission_section'].widget.attrs = {'class': 'basic-multiple'}
return form
def get_context_data(self, **kwargs):
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
context = super(AdmissionUpdate, self).get_context_data(**kwargs)
context['classesform'] = ClassesForm(module_holder)
context['sectionform'] = SectionForm()
context['admissioncreateurl'] = '/student/student/admission/view/'+str(self.kwargs.get('pk'))+'/update'
return context
class AdmissionDetail(PermissionRequiredMixin, LoginRequiredMixin, DetailView):
login_url = 'home:login'
model = Admission
template_name = 'student/admission_detail.html'
context_object_name = 'admisssion_details'
permission_required = 'student.view_admission'
def get_context_data(self, **kwargs):
context = super(AdmissionDetail, self).get_context_data(**kwargs)
return context
class AdmissionView(PermissionRequiredMixin, LoginRequiredMixin, ListView):
login_url = 'home:login'
model = Admission
template_name = 'student/admission_view.html'
permission_required = 'student.view_admission'
def get_context_data(self, **kwargs):
context = super(AdmissionView, self).get_context_data(**kwargs)
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
context['admission_view'] = Admission.objects.filter(module_holder=module_holder)
return context
class AdmissionDelete(PermissionRequiredMixin, LoginRequiredMixin, DeleteView):
login_url = 'home:login'
permission_required = 'student.delete_admission'
model = Admission
success_url = reverse_lazy('student:admission_view')
# INDVIGUAL STUDENT
class IndividualMarksView(PermissionRequiredMixin, LoginRequiredMixin, DetailView):
login_url = 'home:login'
permission_required = 'student.view_markdistribution'
days = [
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
]
slug_url_kwargs = 'year_slug'
fee_year = timezone.now().strftime('%Y')
tab = False
# def get_object(self, queryset=None):
# return queryset.get(slug=self.slug)
def get(self, request, *args, **kwargs):
# admission = get_object_or_404(Admission, pk=kwargs['pk'])
# global fee_year
print(request.GET)
if request.GET.get('year') and request.GET.get('tab'):
self.fee_year = request.GET.get('year')
self.tab = True
student_id = kwargs['student_name']
admission = Admission.objects.get(pk=kwargs['student_name'])
routines = []
if self.request.user.is_staff:
module_holder = self.request.user.username
else:
this_holder = Teacher.objects.get(user_ptr_id=self.request.user.id)
module_holder = this_holder.module_holder
for day in self.days:
matched_routine = Routine.objects.filter(module_holder=module_holder, class_name=admission.admission_class, school_day__contains=[day])
routines.append(
{
'days':day,
'routine': matched_routine
}
)
student_attendance_detail_year = []
monthly_detal = []
daily_detail = []
total_holidays = 0
total_leave = 0
total_present = 0
total_late = 0
total_absent = 0
total_off = 0
for year in range(start_year, int(current_year)+1):
# monthly_detal.clear()
for month in all_months:
# daily_detail.clear()
for day in range(1, monthrange(year,int(month))[1]+1):
if day < 10:
day = '0'+str(day)
date = str(year)+'-'+str(month)+'-'+str(day)
date = datetime.datetime.strptime(date,'%Y-%m-%d')
daily = Attendance.objects.filter(student_id=admission, date=date)
daily_inner = []
if daily.exists():
for detail in daily:
if attendance_converter(detail.attendance_selection, date) == 'P':
total_present +=1
elif attendance_converter(detail.attendance_selection, date) == 'A':
total_absent +=1
elif attendance_converter(detail.attendance_selection, date) == 'O':
total_off +=1
elif attendance_converter(detail.attendance_selection, date) == 'H':
total_holidays +=1
elif attendance_converter(detail.attendance_selection, date) == 'L':
total_leave +=1
elif attendance_converter(detail.attendance_selection, date) == 'L-I':
total_late +=1
# print('------------------------------')
# print(day)
daily_inner.append({
'day': day,
'month': convert_month_into_string(month),
'attendance': attendance_converter(detail.attendance_selection, date),
'remark': detail.remarks,
'att_date': detail.date
})
else:
if attendance_converter('', date) == 'P':
total_present +=1
elif attendance_converter('', date) == 'A':
total_absent +=1
elif attendance_converter('', date) == 'O':
total_off +=1
elif attendance_converter('', date) == 'H':
total_holidays +=1
elif attendance_converter('', date) == 'L':
total_leave +=1
elif attendance_converter('', date) == 'L-I':
total_late +=1
# print('------------------------------')
# print(day)
daily_inner.append({
'day': day,
'month': convert_month_into_string(month),
'attendance': attendance_converter('', date),
'remark': 'N/A',
'att_date': 'N/A'
})
# daily_detail.append(daily_inner)
monthly_detal.append(
{'daily': daily_inner}
)
student_attendance_detail_year.append({
'year': year,
'month': monthly_detal
})
# routine.append()
collect_std_marks_subs = []
student_object = Admission.objects.filter(pk=student_id).first()
class_of_student = Classes.objects.filter(class_name=student_object.admission_class.class_name, module_holder=module_holder).first()
subjects = Subject.objects.filter(class_name__contains=class_of_student.pk, module_holder=module_holder)
get_calculated_marks = []
for mos in subjects:
out_of_marks = MarkDistribution.objects.filter(subject_name=mos.pk).first()
in_of_marks = StudentMark.objects.filter(class_name=class_of_student.pk, subject=mos.pk, student_name=student_id,module_holder=module_holder).first()
# marks_of_student = StudentMark.objects.filter(student_name=student_id, module_holder=module_holder)
exam = 0
attendance = 0
assignment = 0
class_test = 0
calculated_marks_object = CalculateResults.objects.filter(student=admission, module_holder=module_holder, subject=mos.pk)
for calc in calculated_marks_object:
get_calculated_marks.append({
'exam_type': calc.exam_type,
'total_marks': calc.total_marks,
'assigned_marks': calc.marks,
'subject': calc.subject
})
if out_of_marks:
in_of_marks = StudentMark.objects.filter(class_name=class_of_student.pk, subject=mos.pk, student_name=student_id,module_holder=module_holder).first()
if in_of_marks:
exam = in_of_marks.exam
attendance = in_of_marks.attendance
assignment = in_of_marks.assignment
class_test = in_of_marks.class_test
collect_std_marks_subs.append({
'subject_name': mos.subject_name,
'examout': out_of_marks.exam,
'attendanceout': out_of_marks.attendance,
'assignmentout': out_of_marks.assignment,
'classtestout': out_of_marks.class_test,
'examin': exam,
'attendancin': attendance,
'assignmentin': assignment,
'classtestin': class_test
})
else:
collect_std_marks_subs.append({
'subject_name': mos.subject_name,
'examout': 0,
'attendanceout': 0,
'assignmentout': 0,
'classtestout': 0,
'examin': exam,
'attendancin': attendance,
'assignmentin': assignment,
'classtestin': class_test
})
# print(collect_std_marks_subs)
# print('mamamamamam',subjects)
# START WORKING WITH STUDENT PAYMENT SYSTEM
class_object = Classes.objects.filter(class_name__contains=admission.admission_class.class_name).first()
voucher_object = Voucher.objects.filter(student_name=student_id)
payment_system = []
for voucher in voucher_object:
payment_system.append({
'fee_month': voucher.fee_month,
'tution_fee_payable': voucher.monthly_tution_fee,
'tution_fee_paid': voucher.monthly_tution_fee_paid
})
payment_system_info = {
'admission_date': admission.admission_date,
'monthly_fee': class_object.fee,
}
context = {
'collect_std_marks_subs': collect_std_marks_subs,
'routines': routines,
'admission': admission,
'student_attendance_detail_year':student_attendance_detail_year,
'total_present': total_present,
'total_absent': total_absent,
'total_off': total_off,
'total_holidays': total_holidays,
'total_leave': total_leave,
'total_late': total_late,
'payment_system_info': calculate_student_fee_for_year(request, student_id, admission, self.fee_year),
'get_calculated_marks': get_calculated_marks,
'tab': self.tab,
'range': range(2015,2030),
}
return render(request, 'attendance/individual_marks_view.html', context)
def calculate_student_fee_for_year(request, *args, **kwargs):
class_object = Classes.objects.filter(class_name__contains=args[1].admission_class.class_name).first()
admission_month = int(str(args[1].admission_date).split('-')[1])
payment_system = []
total_payable = 0
total_paid = 0
for month in range(admission_month, 13):
voucher_object = Voucher.objects.filter(student_name=args[0], month=add_zero_to_month(month), year=args[2])
total_payable = total_payable+args[1].monthly_tution_fee
# total_paid = total_paid+total_payable
if voucher_object:
for voucher in voucher_object:
# total_payable = total_payable+voucher.monthly_tution_fee
if voucher.monthly_tution_fee != args[1].monthly_tution_fee:
detected = args[1].monthly_tution_fee-voucher.monthly_tution_fee
total_payable = total_payable-detected
total_paid = total_paid+voucher.monthly_tution_fee_paid
payment_system.append({
'status': 'Voucher Generated',
'fee_month': str(args[2])+'-'+convert_month_2_string(int(str(voucher.fee_month).split('-')[1])),
'tution_fee_payable': voucher.monthly_tution_fee,
'tution_fee_paid': voucher.monthly_tution_fee_paid,
'total_payable': total_payable,
'total_paid': total_paid
})
else:
payment_system.append({
'status': 'Voucher Not Generated',
'fee_month': str(args[2])+'-'+convert_month_2_string(month),
'tution_fee_payable': args[1].monthly_tution_fee,
'tution_fee_paid': 0,
'total_payable': total_payable,
'total_paid': | |
#
# Copyright (c) 2015, <NAME> <<EMAIL>>
# Pypsi - https://github.com/ameily/pypsi
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
'''
Provides functions and classes for dealing with command line input and output.
'''
from pypsi.ansi import ansi_ljust, ansi_rjust, ansi_center, ansi_len
def get_lines(txt):
'''
Break text to individual lines.
:returns tuple: a tuple containing the next line and whether the line
contained a newline charater.
'''
if not txt:
return
start = 0
try:
while True:
i = txt.index('\n', start)
yield (txt[start:i], True)
start = i + 1
if start >= len(txt):
break
except:
yield (txt[start:], False)
def wrap_line(txt, width, wrap_prefix=None):
'''
Word wrap a single line.
:param str txt: the line to wrap
:param int width: the maximum width of a wrapped line
:returns str: the next wrapped line
'''
if width is None or width <= 0:
yield txt
else:
wrap_prefix = wrap_prefix or ''
start = 0
count = 0
i = 0
total = len(txt)
first_line = True
while i < total:
esc_code = False
prev = None
while count <= width and i < total:
c = txt[i]
if c == '\x1b':
esc_code = True
elif esc_code:
if c in 'ABCDEFGHJKSTfmnsulh':
esc_code = False
else:
count += 1
if c in ' \t':
prev = i
i += 1
if i >= total:
prev = i
else:
prev = prev or i
if wrap_prefix and first_line:
first_line = False
width -= len(wrap_prefix)
yield txt[start:prev]
else:
yield wrap_prefix + txt[start:prev]
start = prev
while start < total and txt[start] in '\t ':
start += 1
i = start
count = 0
if count:
if not first_line:
yield wrap_prefix + txt[start:]
else:
yield txt[start:]
def highlight(target, term, color='1;32'):
'''
Find and highlight a term inside of a block of text.
:param str target: the text to search in
:param str term: the term to search for
:param str color: the color to output when the term is found
:returns str: the input string with all occurrences of the term highlighted
'''
if not color:
return target
s = target.lower()
t = term.lower()
start = 0
end = s.find(t)
ret = ''
while end >= 0:
ret += target[start:end]
ret += '\x1b[{color}m{term}\x1b[0m'.format(
color=color, term=target[end:end + len(term)]
)
start = end + len(term)
end = s.find(t, start)
ret += target[start:]
return ret
def file_size_str(value):
'''
Get the human readable file size string from a number. This will convert a
value of 1024 to 1Kb and 1048576 to 1Mb.
:param int value: the value to convert
:returns str: the human readable file size string
'''
value = float(value)
units = ['Kb', 'Mb', 'Gb', 'Tb']
unit = 'B'
for u in units:
v = value / 1024.0
if v < 1.0:
break
value = v
unit = u
if unit == 'B':
return "{} B".format(int(value))
return "{:.2f} {}".format(value, unit)
def _format_value(stream, type, value):
'''
Color format a value as {type}( {value} ).
:param AnsiStream stream: ANSI stream
:param str type: type string
:param any value: value to format
'''
tmpl = "{blue}{type}({reset} {value} {blue}){reset}"
if stream:
return stream.ansi_format(tmpl, type=type, value=value)
return "{type}( {value} )".format(type=type, value=value)
def obj_str(obj, max_children=3, stream=None):
'''
Pretty format an object with colored type information. Examples:
- `list`: ``list( item1, item2, item3, ...)``
- `bool`: ``bool( True )``
- `None`: ``<null>``
:param object obj: object to format
:param int max_children: maximum number of children to print for lists
:param file stream: target stream, used to determine if color will be used.
:returns str: the formatted object
'''
if isinstance(obj, bool):
return _format_value(stream, "bool", obj)
if isinstance(obj, int):
return _format_value(stream, "int", "{:d}".format(obj))
if isinstance(obj, float):
return _format_value(stream, "float", "{:g}".format(obj))
if isinstance(obj, (list, tuple)):
if len(obj) > max_children > 0:
obj = obj[:max_children]
obj.append('...')
return _format_value(
stream, "list",
', '.join([
obj_str(child, max_children=max_children, stream=stream)
for child in obj
])
)
if obj is None:
if stream:
return stream.ansi_format("{blue}<null>{reset}")
return "<null>"
if isinstance(obj, str):
return obj
return str(obj)
def title_str(title, width=80, align='left', hr='=', box=False):
lines = []
if box:
border = '+' + ('-' * (width - 2)) + '+'
t = None
if align == 'left':
t = ansi_ljust(title, width - 4)
elif align == 'center':
t = ansi_center(title, width - 4)
else:
t = ansi_rjust(title, width - 4)
lines.append(border)
lines.append('| ' + t + ' |')
lines.append(border)
else:
if align == 'left':
lines.append(title)
elif align == 'center':
lines.append(ansi_center(title, width))
elif align == 'right':
lines.append(ansi_rjust(title, width))
lines.append(hr * width)
return '\n'.join(lines)
class Table(object):
'''
Variable width table.
'''
def __init__(self, columns, width=80, spacing=1, header=True):
'''
:param multiple columns: a list of either class:`Column` instances or a
single `int` defining how many columns to create
:param int width: the maximum width of the entire table
:param int spacing: the amount of space characters to display between
columns
:param bool header: whether to display header row with the column names
'''
if isinstance(columns, int):
self.columns = [Column()] * columns
header = False
else:
self.columns = columns
self.width = width
self.spacing = spacing
self.header = header
self.rows = []
def append(self, *args):
'''
Add a row to the table.
:param list args: the column values
'''
self.rows.append(args)
for (col, value) in zip(self.columns, args):
col.width = max(col.width, ansi_len(str(value)))
return self
def extend(self, *args):
'''
Add multiple rows to the table, each argument should be a list of
column values.
'''
for row in args:
self.append(*row)
return self
def write(self, fp):
'''
Print the table to a specified file stream.
:param file fp: output stream
'''
def write_overflow(row):
overflow = [''] * len(self.columns)
column_idx = 0
for (col, value) in zip(self.columns, row):
if column_idx > 0:
fp.write(' ' * self.spacing)
if isinstance(value, str):
pass
else:
value = str(value)
if ansi_len(value) <= col.width:
fp.write(ansi_ljust(value, col.width))
else:
wrapped_line = list(wrap_line(value, col.width))
if len(wrapped_line) > 1:
overflow[column_idx] = ' '.join(wrapped_line[1:])
fp.write(wrapped_line[0])
# Move to next column
column_idx += 1
fp.write('\n')
# deal with overflowed data
if ''.join(overflow):
write_overflow(overflow)
total = sum([col.width for col in self.columns])
# Resize columns if last too wide
# TODO: Smarter column resizing, maybe pick widest column
if (total + self.spacing * (len(self.columns) - 1)) > self.width:
self.columns[-1].mode = Column.Grow
for col in self.columns:
if col.mode == Column.Grow:
remaining = (
self.width - ((len(self.columns) - 1) * self.spacing) -
total
)
col.width += remaining
if self.header:
i = 0
for col in self.columns:
if i > 0:
fp.write(' ' * self.spacing)
fp.write(ansi_ljust(col.text, col.width))
i += 1
fp.write('\n')
fp.write('=' * self.width)
fp.write('\n')
for row in self.rows:
write_overflow(row)
return 0
class Column(object):
'''
A table column.
'''
#: Size mode to have the column shrink to its contents
Shrink = 0
#: Size mode to have the column grow to the maximum width it can have
Grow = 1
def __init__(self, text='', mode=0):
'''
:param str text: the column name
:param int mode: the column size mode
'''
self.text = text
self.mode = mode
self.width = ansi_len(text)
class FixedColumnTable(object):
'''
A table that has preset column widths.
'''
def __init__(self, widths):
'''
:param list widths: the list of column widths (`int`)
'''
self.widths = [int(width) for width in widths]
self.buffer = []
def write_row(self, fp, *args):
'''
Print a single row.
:param file fp: the output file stream (usually sys.stdout or
sys.stderr)
:param list args: the column values for the row
| |
isinstance(validations, dict):
raise TypeError('validations is required to be a dict. type: {1} was passed.'.format(type(validations)))
if not isinstance(property_name, six.string_types):
raise TypeError("property_key must be a string. type: {0} was passed.".format(type(property_name)))
# reorder validates putting required first. If the data doesn't exist there is no need to continue.
order = ['type', 'required'] + [key for key in validations.keys() if key not in ('required', 'type')]
ordered_validations = OrderedDict(sorted(validations.items(), key=lambda x: order.index(x[0])))
for validation, value in six.iteritems(ordered_validations):
if validation in VALIDATORS:
if validation == 'not':
# TODO: need to test to make sure this works
for err in self.__execute_validations(value, data, property_name, ancestors, negation, prefix):
yield err
continue
for err in getattr(self, '_{0}'.format(validation))(value, data, property_name, ancestors, negation, prefix):
yield err
else:
raise LookupError("{0} isn't a validator or reserved scheme key.".format(validation))
def _type(self, expected, value, key, ancestors, negation, prefix):
error = None
formatter = 'config-success'
if isinstance(expected, tuple):
data_type = []
for dt in expected:
if isinstance(dt, tuple):
data_type.extend([element for element in dt])
else:
data_type.append(dt)
data_type = ' or '.join([dt.__name__ for dt in data_type])
else:
data_type = expected.__name__
# TODO: update not logic.
if negation:
if isinstance(value, expected):
error = ConfigValidationException(ancestors, key, value, 'type', data_type)
formatter = 'config-failure'
elif not isinstance(value, expected):
error = ConfigValidationException(ancestors, key, value, 'type', data_type)
formatter = 'config-failure'
logger.info(
self.__build_validation_message(ancestors, key, 'type', data_type),
extra={'formatter': formatter, 'prefix': prefix}
)
yield error
def _required(self, expected, value, key, ancestors, negation, prefix):
error = None
if not isinstance(expected, (list, tuple)):
raise TypeError('included is required to be a list or tuple. {0} was passed'.format(type(expected).__str__))
matches = []
for expected_key in expected:
if not isinstance(expected_key, six.string_types):
raise TypeError('each value in the included list must be a string.'.format(type(expected_key).__str__))
if expected_key in value and value.get(expected_key) is not None:
logger.info(
self.__build_validation_message(ancestors, key, 'required', expected_key),
extra={'formatter': 'config-success', 'prefix': prefix}
)
else:
logger.info(
self.__build_validation_message(ancestors, key, 'required', expected_key),
extra={'formatter': 'config-failure', 'prefix': prefix}
)
matches.append(expected_key)
if matches:
error = ConfigValidationException(ancestors, key, value, 'required', matches)
yield error
def _items(self, expected, values, key, ancestors, negation, prefix):
error = None
items_prefix = ' \u21B3'
item_identifier = ' \u2605'
if prefix:
items_prefix = items_prefix.rjust(4)
item_identifier = item_identifier.rjust(4)
if isinstance(values, (list, tuple)):
# validate each value.
for i, value in enumerate(values):
logger.info(
self.__build_validation_message(ancestors, key, 'item[{0}]'.format(i), value),
extra={'formatter': 'config-message', 'prefix': item_identifier}
)
try:
self._walk_tree(value, expected, ancestors, key, prefix=items_prefix)
except ConfigValidationException as e:
error = e
else:
raise TypeError('Can\'t validate items if a list or tuple isn\'t passed.')
yield error
def _max(self, expected, value, key, ancestors, negation, prefix):
"""
:param expected:
:param value:
:param key:
:param ancestors:
:param negation:
:return:
"""
error = None
formatter = 'config-success'
length = len(value) if value is not None else expected
if length > expected:
formatter = 'config-failure'
error = ConfigValidationException(ancestors, key, value, 'max', expected)
logger.info(
self.__build_validation_message(ancestors, key, 'max', expected),
extra={'formatter': formatter, 'prefix': prefix}
)
yield error
def _one_of(self, expected, values, key, ancestors, negation, prefix):
error = None
valid = 0
one_of_prefix = ' \u21B3'
if prefix:
one_of_prefix = one_of_prefix.rjust(4)
if '~' in expected:
expected = self._reference_keys(expected)
logger.info(" \u2605 {0}".format(self.__build_validation_message(ancestors, key, 'one_of', '\u2605')))
for i, expected_value in enumerate(expected):
logger.info(" \u2605 {0}".format(
self.__build_validation_message(ancestors, key, 'one_of', 'item[{0}]'.format(i)))
)
if isinstance(expected_value, dict):
try:
self._walk_tree(values, expected_value, ancestors.copy(), key, prefix=one_of_prefix)
valid += 1
error = None
except ConfigValidationException as e:
if not valid and error is None:
error = ConfigOneOfException(ancestors, key, values, 'one_of', None)
error.additional_messages.append(e.message)
pass
except Exception:
pass
else:
if self.__one_of_validation(values, expected_value):
valid += 1
error = None
formatter = 'config-success'
else:
formatter = 'config-failure'
if not valid:
error = ConfigValidationException(ancestors, key, values, 'one_of', expected)
logger.info(
self.__build_validation_message(ancestors, key, 'one_of', expected_value),
extra={'formatter': formatter, 'prefix': one_of_prefix}
)
yield error
def __one_of_validation(self, values, current_expected_value):
if isinstance(values, (dict, list, tuple)):
return values and current_expected_value in values
else:
return values and current_expected_value == values
def _scheme_propagation(self, scheme, definitions):
""" Will updated a scheme based on inheritance. This is defined in a scheme objects with ``'inherit': '$definition'``.
Will also updated parent objects for nested inheritance.
Usage::
>>> SCHEME = {
>>> 'thing1': {
>>> 'inherit': '$thing2'
>>> },
>>> '_': {
>>> 'thing2': {
>>> 'this_is': 'thing2 is a definition'
>>> }
>>> }
>>> }
>>> scheme = SCHEME.get('thing1')
>>> if 'inherit' in scheme:
>>> scheme = self._scheme_propagation(scheme, SCHEME.get('_'))
>>>
>>> scheme.get('some_data')
:param scheme: A dict, should be a scheme defining validation.
:param definitions: A dict, should be defined in the scheme using '_'.
:rtype: A :dict: will return a updated copy of the scheme.
"""
if not isinstance(scheme, dict):
raise TypeError('scheme must be a dict to propagate.')
inherit_from = scheme.get('inherit')
if isinstance(inherit_from, six.string_types):
if not inherit_from.startswith('$'):
raise AttributeError('When inheriting from an object it must start with a $.')
if inherit_from.count('$') > 1:
raise AttributeError('When inheriting an object it can only have one $.')
if not isinstance(definitions, dict):
raise AttributeError("Must define definitions in the root of the SCHEME. "
"It is done so with '_': { objs }.")
name = inherit_from[1:]
definition = definitions.copy().get(name)
if not definition:
raise LookupError(
'Was unable to find {0} in definitions. The follow are available: {1}.'.format(name, definitions)
)
else:
raise AttributeError('inherit must be defined in your scheme and be a string value. format: $variable.')
updated_scheme = {key: value for key, value in six.iteritems(scheme) if key not in definition}
nested_scheme = None
for key, value in six.iteritems(definition):
if key in scheme:
updated_scheme[key] = scheme[key]
else:
updated_scheme[key] = value
if key == 'inherit':
nested_scheme = self._scheme_propagation(definition, definitions)
# remove inherit key
if 'inherit' in updated_scheme:
del updated_scheme['inherit']
if nested_scheme is not None:
updated_scheme.update(nested_scheme)
return updated_scheme
def __build_validation_message(self, ancestors, property_name, validation, expected_value):
msg = " \u27A4 ".join(ancestors.keys())
if isinstance(expected_value, dict):
expected_value = expected_value.keys()
if property_name == list(ancestors)[-1]:
msg = "{0} \u27A4 {1}: {2}".format(msg, validation, expected_value)
else:
msg = '{0} \u27A4 {1} \u27A4 {2}: {3}'.format(msg, property_name, validation, expected_value)
return msg
##
# Config Data Type Classes
##
class ConfigNode(object):
def __init__(self, start_mark, end_mark):
self._alias = None
self._name = None
self._start_mark = start_mark
self._end_mark = end_mark
@property
def alias(self):
return self._alias
@alias.setter
def alias(self, value):
if value is not None and not isinstance(value, six.string_types):
raise TypeError('name must be a string.')
self._alias = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if value is not None and not isinstance(value, six.string_types):
raise TypeError('name must be a string.')
self._name = value
@property
def start_mark(self):
return self._start_mark
@property
def end_mark(self):
return self._end_mark
class ConfigDict(dict, ConfigNode):
def __init__(self, node_data, start_mark=None, end_mark=None):
super(ConfigDict, self).__init__(node_data)
ConfigNode.__init__(self, start_mark, end_mark)
def __contains__(self, item):
try:
return super(ConfigDict, self).__contains__(item) or hasattr(self, item)
except:
return False
def __delattr__(self, item):
try:
object.__getattribute__(self, item)
except AttributeError:
try:
del self[item]
except KeyError:
raise AttributeError(item)
else:
object.__delattr__(self, item)
def __getattr__(self, item):
"""
"""
try:
return object.__getattribute__(self, item)
except:
try:
return self[item]
except:
raise AttributeError(item)
def __setattr__(self, key, value):
try:
object.__getattribute__(self, key)
except AttributeError:
try:
# allow for specific properties to be set on the base class and not be part of the dict.
if key.startswith('_') and key[1:] in dir(self):
object.__setattr__(self, key, value)
else:
self[key] = value
except:
raise AttributeError(key)
else:
object.__setattr__(self, key, value)
class ConfigSeq(list, ConfigNode):
def __init__(self, node_data, start_mark=None, end_mark=None):
list.__init__(self, node_data)
ConfigNode.__init__(self, start_mark, end_mark)
class ConfigUnicode(str, ConfigNode):
def __init__(self, node_data, start_mark=None, end_mark=None):
ConfigNode.__init__(self, start_mark, end_mark)
def __new__(cls, node_data, start_mark=None, end_mark=None):
obj = super(ConfigUnicode, cls).__new__(cls, node_data)
return obj
class ConfigInt(int, ConfigNode):
def __init__(self, node_data, start_mark=None, end_mark=None):
ConfigNode.__init__(self, start_mark, end_mark)
def __new__(cls, node_data, start_mark=None, end_mark=None):
return super(ConfigInt, cls).__new__(cls, node_data)
class ConfigFloat(float, ConfigNode):
def __init__(self, node_data, start_mark=None, end_mark=None):
ConfigNode.__init__(self, start_mark, end_mark)
def __new__(cls, node_data, start_mark=None, end_mark=None):
try:
return float.__new__(cls, node_data)
except TypeError:
raise TypeError(node_data)
except ValueError:
raise ValueError(node_data)
##
# exception classes
##
class ConfigValidationException(Exception):
def __init__(self, ancestors, property_name, data, validation_type, validation_value):
self.ancestors = ancestors
self.data = data
self.property_name = property_name
self.validation_type = validation_type
self.validation_value = validation_value
self.parent_key = list(self.ancestors)[-1]
self.parent_value = ancestors.get(self.parent_key)
self._property_location = None
self._potential_fixes = []
if isinstance(data, ConfigNode):
self._property_location = 'line: {0} column: {1}.'.format(data.start_mark.line, data.start_mark.column)
super(ConfigValidationException, self).__init__(self.message)
@property
def message(self):
msg = '{0} failed validation: {1}.'.format(self.property_name, self.validation_type)
if self._property_location:
msg = '{0} {1}'.format(msg, self._property_location)
fixes = self.potential_fixes()
if fixes:
msg = '{0} {1}'.format(msg, fixes)
return msg
def log_error(self):
logger.error(self.message)
def | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 6 10:49:07 2019
@author: chrisbartel
"""
import os
from mlstabilitytest.stability.CompositionAnalysis import CompositionAnalysis
from mlstabilitytest.stability.HullAnalysis import AnalyzeHull
from mlstabilitytest.mp_data.data import Ef, mp_LiMnTMO, smact_LiMnTMO, hullin, hullout, spaces
from mlstabilitytest.stability.utils import read_json, write_json
import multiprocessing as multip
from time import time
from sklearn.metrics import confusion_matrix, r2_score
import numpy as np
import random
def _update_hullin_space(ml, mp_hullin, space):
"""
replace MP data for chemical space with ML data
Args:
ml (dict) - {formula (str) : {'Ef' : ML-predicted formation energy per atom (float)}}
mp_hullin (dict) - mlstabilitytest.data.hullin
space (str) - chemical space to update (format is '_'.join(sorted([element (str) for element in chemical space])))
Returns:
input data for analysis of one chemical space updated with ML-predicted data
"""
ml_space = mp_hullin[space]
for compound in ml_space:
if (CompositionAnalysis(compound).num_els_in_formula == 1) or (compound not in ml):
continue
else:
ml_space[compound]['E'] = ml[compound]
return ml_space
def _assess_stability(hullin, spaces, compound):
"""
determine the stability of a given compound by hull analysis
Args:
hullin (dict) - {space (str) : {formula (str) : {'E' : formation energy per atom (float),
'amts' :
{element (str) :
fractional amount of element in formula (float)}}}}
spaces (dict) - {formula (str) : smallest convex hull space that includes formula (str)}
compound (str) - formula to determine stability for
Returns:
{'stability' : True if on the hull, else False,
'Ef' : formation energy per atom (float),
'Ed' : decomposition energy per atom (float),
'rxn' : decomposition reaction (str)}
"""
return AnalyzeHull(hullin, spaces[compound]).cmpd_hull_output_data(compound)
def _get_smact_hull_space(compound, mp_spaces):
"""
Args:
compound (str) - compound to retrieve phase space for
mp_spaces (list) - list of phase spaces in MP (str, '_'.join(elements))
Returns:
relevant chemical space (str) to determine stability compound
"""
els = set(CompositionAnalysis(compound).els)
for s in mp_spaces:
space_els = set(s.split('_'))
if (len(els.intersection(space_els)) == 4) and (len(space_els) < 7):
return s
def _update_smact_space(ml, mp_hullin, space):
"""
Args:
ml (dict) - {compound (str) - formation energy per atom (float)}
mp_hullin (dict) - hull input file for all of MP
space (str) - chemical space to update
Returns:
replaces MP formation energy with ML formation energies in chemical space
"""
ml_space = mp_hullin[space]
for compound in ml_space:
if (CompositionAnalysis(compound).num_els_in_formula == 1) or (compound not in ml):
continue
else:
ml_space[compound]['E'] = ml[compound]
for compound in ml:
if set(CompositionAnalysis(compound).els).issubset(set(space.split('_'))):
ml_space[compound] = {'E' : ml[compound],
'amts' : {el : CompositionAnalysis(compound).amt_of_el(el)
for el in space.split('_')}}
return ml_space
def _get_stable_compounds(hullin, space):
"""
Args:
hullin (dict) - hull input data
space (str) - chemical space
Returns:
list of all stable compounds (str) in chemical space
"""
return AnalyzeHull(hullin, space).stable_compounds
class StabilityAnalysis(object):
"""
Perform stability analysis over all of Materials Project using ML-predicted formation energies
"""
def __init__(self,
data_dir,
data_file,
experiment='allMP',
nprocs='all'):
"""
converts input data to convenient format
Args:
data_dir (os.PathLike) - place where input ML data lives and to generate output data
data_file (str) - .json file with input ML data of form {formula (str) : formation energy per atom (float)}
experiment (str) - 'all' for all MP compounds or 'LiMnTMO' for Li-Mn-TM-O (TM in ['V', 'Cr', 'Fe', 'Co', 'Ni', 'Cu'])
nprocs (str or int) - number of processors to parallelize analysis over ('all' --> all available processors)
"""
start = time()
print('\nChecking input data...')
if not os.path.exists(data_dir):
os.mkdir(data_dir)
finput = os.path.join(data_dir, data_file)
input_data = read_json(finput)
input_data = {CompositionAnalysis(k).std_formula() : float(input_data[k])
for k in input_data}
if experiment == 'allMP':
mp = Ef()
compounds = list(mp.keys())
elif experiment == 'LiMnTMO':
mp = mp_LiMnTMO()
compounds = list(mp.keys())
elif experiment == 'smact':
mp = mp_LiMnTMO()
smact = smact_LiMnTMO()
compounds = list(set(list(mp.keys()) + smact['smact']))
elif 'random' in experiment:
mp = Ef()
compounds = list(mp.keys())
else:
raise NotImplementedError
if set(compounds).intersection(set(list(input_data.keys()))) != set(compounds):
print('ML dataset does not include all MP formulas!')
print('Cannot perform analysis.')
raise AssertionError
input_data = {c : input_data[c] for c in compounds}
if 'random' in experiment:
random.seed(int(experiment.split('random')[1]))
errors = [mp[c]['Ef'] - input_data[c] for c in compounds]
random.shuffle(errors)
input_data = {compounds[i] : float(mp[compounds[i]]['Ef']+errors[i]) for i in range(len(errors))}
input_data = write_json(input_data, finput)
self.compounds = compounds
self.input_data = input_data
self.data_dir = data_dir
self.experiment = experiment
if nprocs == 'all':
self.nprocs = multip.cpu_count() - 1
else:
self.nprocs = nprocs
end = time()
print('Data looks good.')
print('Time elapsed = %.0f s.' % (end-start))
def ml_hullin(self, remake=False):
"""
generates input file for stability analysis using ML data
Note on computational expense:
-for experiment='allMP', requires ~5 min on 27 processors
-for experiment='LiMnTMO', requires ~30 s on 7 processors
Args:
remake (bool) - repeat generation of file if True; else read file
Returns:
dictionary with ML data that can be processed by hull analysis program
{space (str) : {formula (str) : {'E' : formation energy per atom (float),
'amts' :
{element (str) :
fractional amount of element in formula (float)}}}}
saves dictionary to file
"""
fjson = os.path.join(self.data_dir, 'ml_hullin.json')
if not remake and os.path.exists(fjson):
print('\nReading existing stability input file: %s.' % fjson)
return read_json(fjson)
nprocs = self.nprocs
print('\nGenerating stability input file on %i processors...' % nprocs)
start = time()
compounds = self.compounds
mp_hullin = hullin()
compound_to_space = spaces()
relevant_spaces = list(set([compound_to_space[compound] for compound in compounds]))
ml = self.input_data
pool = multip.Pool(processes=nprocs)
ml_spaces = pool.starmap(_update_hullin_space,
[(ml, mp_hullin, space)
for space in relevant_spaces])
ml_hullin = dict(zip(relevant_spaces, ml_spaces))
end = time()
print('Writing to %s' % fjson)
print('Time elapsed = %.0f s' % (end-start))
return write_json(ml_hullin, fjson)
def ml_hullout(self, remake=False):
"""
generates output file with stability analysis using ML data
Note on computational expense:
-for experiment='allMP', requires ~10 min on 27 processors
-for experiment='LiMnTMO', requires ~30 s on 7 processors
Args:
remake (bool) - repeat generation of file if True; else read file
Returns:
dictionary with stability analysis using ML-predicted formation energies
{formula (str) : {'stability' : True if on the hull, else False,
'Ef' : formation energy per atom (float),
'Ed' : decomposition energy per atom (float),
'rxn' : decomposition reaction (str)}}
saves dictionary to file
"""
fjson = os.path.join(self.data_dir, 'ml_hullout.json')
if not remake and os.path.exists(fjson):
print('\nReading existing stability output file: %s.' % fjson)
return read_json(fjson)
ml_hullin = self.ml_hullin(False)
nprocs = self.nprocs
print('\nGenerating stability output file on %i processors...' % nprocs)
start = time()
compounds = self.compounds
hull_spaces = spaces()
pool = multip.Pool(processes=nprocs)
stabilities = pool.starmap(_assess_stability,
[(ml_hullin, hull_spaces, compound)
for compound in compounds])
ml_hullout = dict(zip(compounds, stabilities))
end = time()
print('Writing to %s.' % fjson)
print('Time elapsed = %.0f s.' % (end-start))
return write_json(ml_hullout, fjson)
def results(self, remake=False):
"""
generates output file with summary data for rapid analysis
Args:
remake (bool) - repeat generation of file if True; else read file
Returns:
dictionary with results in convenient format for analysis
{'stats' : {'Ed' : decomposition energy prediction statistics (dict),
'Ef' : formation energy prediction statistics (dict)},
'data' : {'Ed' : decomposition energy data (list of floats),
'Ef' : formation energy data (list of floats),
'rxns' : decomposition reactions (list of str),
'formulas' : compounds (list of str)}}
"""
fjson = os.path.join(self.data_dir, 'ml_results.json')
if not remake and os.path.exists(fjson):
print('\nReading existing results file: %s' % fjson)
return read_json(fjson)
ml_hullout = self.ml_hullout(False)
print('\nCompiling results...')
start = time()
compounds = self.compounds
mp_hullout = hullout()
mp_hullout = {compound : mp_hullout[compound] for compound in compounds}
obj = StabilitySummary(mp_hullout, ml_hullout)
results = {'stats' : {'Ed' : obj.stats_Ed,
'Ef' : obj.stats_Ef},
'data' : {'Ed' : obj.Ed['pred'],
'Ef' : obj.Ef['pred'],
'rxns' : obj.rxns['pred'],
'formulas' : obj.formulas}}
end = time()
print('Writing to %s.' % fjson)
print('Time elapsed = %.0f s.' % (end-start))
return write_json(results, fjson)
@property
def results_summary(self):
"""
Args:
Returns:
prints a summary of key results
"""
| |
'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'86180563':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'86180560':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'86180561':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5317\u5e02')},
'861811466':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811467':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861811464':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811465':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861807673':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861811463':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811460':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811461':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861810331':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861810330':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861810333':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'861810332':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861810335':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'861810334':{'en': 'Qinhuangdao, Hebei', 'zh': u('\u6cb3\u5317\u7701\u79e6\u7687\u5c9b\u5e02')},
'861770558':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861770559':{'en': 'Huangshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9ec4\u5c71\u5e02')},
'861770556':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861770557':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861770554':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861770555':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861770552':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'861770553':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')},
'861770550':{'en': 'Chuzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6ec1\u5dde\u5e02')},
'861770551':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861802189':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861802188':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861802181':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861802180':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861802183':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861802182':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861802185':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861802184':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861802187':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861802186':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861811682':{'en': 'Bortala, Xinjiang', 'zh': u('\u65b0\u7586\u535a\u5c14\u5854\u62c9\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861811683':{'en': 'Bortala, Xinjiang', 'zh': u('\u65b0\u7586\u535a\u5c14\u5854\u62c9\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861811680':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861811681':{'en': 'Bortala, Xinjiang', 'zh': u('\u65b0\u7586\u535a\u5c14\u5854\u62c9\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861811686':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861811687':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861811684':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861811685':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861803922':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861811688':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861811689':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861769806':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861769807':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861769804':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861769805':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861769802':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861769803':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861769800':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861769801':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861810159':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861810158':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861769808':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861769809':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861807111':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861807110':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861807113':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861807112':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861807115':{'en': 'Xiangyang, Hubei', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'861807114':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861807117':{'en': 'Ezhou, Hubei', 'zh': u('\u6e56\u5317\u7701\u9102\u5dde\u5e02')},
'861807116':{'en': 'Xiangyang, Hubei', 'zh': u('\u6e56\u5317\u7701\u8944\u6a0a\u5e02')},
'861807119':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861807118':{'en': 'Xiaogan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5b5d\u611f\u5e02')},
'861807775':{'en': 'Qinzhou, Guangxi', 'zh': u('\u5e7f\u897f\u94a6\u5dde\u5e02')},
'861807774':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861771201':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771200':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771203':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771202':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771205':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861771204':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861771207':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771206':{'en': 'HuaiAn, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861771209':{'en': 'Xuzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771208':{'en': 'Xuzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861813358':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861813359':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861800859':{'en': 'Qianxinan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861800858':{'en': 'Liupanshui, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u516d\u76d8\u6c34\u5e02')},
'861800589':{'en': '<NAME>', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'861800588':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861813688':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861813689':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861813680':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861813681':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861813682':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861813683':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861813684':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861813685':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861813686':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861813687':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861813176':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861813177':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861813174':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861813175':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861813172':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861813173':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861813170':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861813171':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861813178':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861813179':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861775141':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861810010':{'en': 'Tianjin', 'zh': u('\u5929\u6d25\u5e02')},
'861802977':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861809974':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861803943':{'en': 'L<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6d1b\u9633\u5e02')},
'861801676':{'en': 'Ningde, Fujian', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'861801677':{'en': 'Ningde, Fujian', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'861801674':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861801675':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861801672':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861801673':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861801670':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861801671':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861801102':{'en': 'Lu<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861801678':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'861801679':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'861801103':{'en': 'Luzhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861813735':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861774808':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861774809':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861808233':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861808232':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861808235':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861808234':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861808237':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861808236':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861774800':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861774801':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5e7f\u5b89\u5e02')},
'861774802':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5e7f\u5143\u5e02')},
'861774803':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u4e50\u5c71\u5e02')},
'861774804':{'en': 'Luzhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861774805':{'en': 'Mianyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861774806':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861774807':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'86180492':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861809724':{'en': '<NAME>inghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861809725':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861809726':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861809727':{'en': 'Haixi, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u897f\u8499\u53e4\u65cf\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861809720':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861809721':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861809722':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861809723':{'en': 'Xining, Qinghai', 'zh': u('\u9752\u6d77\u7701\u897f\u5b81\u5e02')},
'861809728':{'en': 'Yushu, Qinghai', 'zh': u('\u9752\u6d77\u7701\u7389\u6811\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861809729':{'en': 'Hainan, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u5357\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861808055':{'en': 'Chengdu, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861808054':{'en': 'Chengdu, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861808057':{'en': 'YaAn, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u96c5\u5b89\u5e02')},
'861808056':{'en': 'Chengdu, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861808051':{'en': 'Bazhong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5df4\u4e2d\u5e02')},
'861808050':{'en': 'Bazhong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5df4\u4e2d\u5e02')},
'861808053':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861808052':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5df4\u4e2d\u5e02')},
'861808059':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u96c5\u5b89\u5e02')},
'861808058':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u96c5\u5b89\u5e02')},
'861812168':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861808363':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9075\u4e49\u5e02')},
'861812169':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861805242':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861808361':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861808360':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861808367':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'861808366':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'861809972':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861808365':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9075\u4e49\u5e02')},
'861808364':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9075\u4e49\u5e02')},
'861802372':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861802373':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861802370':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861802371':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861802376':{'en': 'Yunfu, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861802377':{'en': 'Yunfu, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861802374':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861802375':{'en': 'Yunfu, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861802378':{'en': 'Yunfu, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861802379':{'en': 'Yunfu, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861808369':{'en': 'Liupanshui, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u516d\u76d8\u6c34\u5e02')},
'861804910':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861806376':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u9f99\u5ca9\u5e02')},
'861804912':{'en': 'Yulin, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'861804913':{'en': 'Weinan, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861804914':{'en': 'Shangluo, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')},
'861804915':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')},
'861804916':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861806377':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u9f99\u5ca9\u5e02')},
'861804918':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861804919':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'861770392':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9e64\u58c1\u5e02')},
'861806375':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u5357\u5e73\u5e02')},
'861806372':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u5357\u5e73\u5e02')},
'861770427':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861809988':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861809012':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7518\u5b5c\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861806370':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u5357\u5e73\u5e02')},
'861809013':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7518\u5b5c\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861809980':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861809981':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861809982':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861806371':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u5357\u5e73\u5e02')},
'861809984':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861809010':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u96c5\u5b89\u5e02')},
'861809986':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861809987':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861809011':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u96c5\u5b89\u5e02')},
'861809016':{'en': 'Luzhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861775148':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861809017':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861775149':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861809014':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7518\u5b5c\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861809015':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861809549':{'en': '<NAME>', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'86180351':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'86180350':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')},
'86180353':{'en': 'Yangquan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u9633\u6cc9\u5e02')},
'86180352':{'en': 'Datong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5927\u540c\u5e02')},
'86180355':{'en': 'Changzhi, Shanxi', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'86180354':{'en': 'Jinzhong, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'86180357':{'en': 'Linfen, Shanxi', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'86180356':{'en': 'Jincheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')},
'86180359':{'en': 'Yuncheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')},
'86180358':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')},
'861811143':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861811142':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861811145':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u4e50\u5c71\u5e02')},
'861811144':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u4e50\u5c71\u5e02')},
'861811147':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861811146':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u4e50\u5c71\u5e02')},
'861811106':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u51c9\u5c71\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861769121':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861806184':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861769123':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861769122':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861769125':{'en': 'Ankang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')},
'861769124':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861769127':{'en': 'Baoji, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861769126':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861769129':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u94dc\u5ddd\u5e02')},
'861769128':{'en': 'Shangluo, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')},
'861806189':{'en': 'Xuzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861806188':{'en': 'Nanjing, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861811100':{'en': 'Neijiang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5185\u6c5f\u5e02')},
'861803139':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861806458':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'861806453':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u4e09\u660e\u5e02')},
'861806452':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u4e09\u660e\u5e02')},
'861806451':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'861806450':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'861803137':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861806456':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'861803135':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861806454':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'861810421':{'en': 'Chaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u671d\u9633\u5e02')},
'861810420':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')},
'861810423':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u6c88\u9633\u5e02')},
'861810422':{'en': 'Anshan, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')},
'861807588':{'en': 'Hengyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u8861\u9633\u5e02')},
'861807589':{'en': 'Hengyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u8861\u9633\u5e02')},
'861807586':{'en': 'Changsha, Hunan', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861807587':{'en': 'Hengyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u8861\u9633\u5e02')},
'861807584':{'en': 'Xiangxi, Hunan', 'zh': u('\u6e56\u5357\u7701\u6e58\u897f\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861807585':{'en': 'Changsha, Hunan', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861807582':{'en': 'Yongzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u6c38\u5dde\u5e02')},
'861807583':{'en': 'Xiangxi, Hunan', 'zh': u('\u6e56\u5357\u7701\u6e58\u897f\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861807580':{'en': 'Yongzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u6c38\u5dde\u5e02')},
'861807581':{'en': 'Yongzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u6c38\u5dde\u5e02')},
'861810427':{'en': 'Panjin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u76d8\u9526\u5e02')},
'861810426':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'861770619':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861770618':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861810569':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861770611':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861770610':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861770613':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861770612':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861770615':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861770614':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861770617':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861770616':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861812306':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861812307':{'en': '<NAME>', | |
expect_ec=ts4_expect_ec)
def M_getInfo(self, _answer_id=0, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Cert.getInfo method call
:param _answer_id: uint32
"""
_r_ = self.C_.call_method('getInfo', {'_answer_id': _answer_id}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_getInfo(self, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Cert.getInfo signed method call
:param _answer_id: uint32
"""
_r_ = self.C_.call_method_signed('getInfo', {'_answer_id': _answer_id}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def getValue(self, index, _answer_id=0, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Cert.getValue
:rtype: address
:param _answer_id: uint32
:param index: int16
"""
return self.G_getValue(index, _answer_id=0, ts4_expect_ec=ts4_expect_ec)
def G_getValue(self, index, _answer_id=0, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Cert.getValue getter
:rtype: address
:param _answer_id: uint32
:param index: int16
"""
return self.C_.call_getter('getValue', {'_answer_id': _answer_id, 'index': index}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_getValue(self, index, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Cert.getValue raw getter
:rtype: address
:param _answer_id: uint32
:param index: int16
"""
return self.C_.call_getter_raw('getValue', {'_answer_id': _answer_id, 'index': index}, expect_ec=ts4_expect_ec)
def M_getValue(self, index, _answer_id=0, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Cert.getValue method call
:param _answer_id: uint32
:param index: int16
"""
_r_ = self.C_.call_method('getValue', {'_answer_id': _answer_id, 'index': index}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_getValue(self, index, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Cert.getValue signed method call
:param _answer_id: uint32
:param index: int16
"""
_r_ = self.C_.call_method_signed('getValue', {'_answer_id': _answer_id, 'index': index}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def setValue(self, index, new_value, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Cert.setValue
:rtype:
:param index: int16
:param new_value: address
"""
if ts4_sign:
return self.S_setValue(index, new_value, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_setValue(index, new_value, ts4_expect_ec=ts4_expect_ec)
def G_setValue(self, index, new_value, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Cert.setValue getter
:rtype:
:param index: int16
:param new_value: address
"""
return self.C_.call_getter('setValue', {'index': index, 'new_value': new_value}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_setValue(self, index, new_value, ts4_expect_ec=0):
"""
Wrapper for D4Cert.setValue raw getter
:rtype:
:param index: int16
:param new_value: address
"""
return self.C_.call_getter_raw('setValue', {'index': index, 'new_value': new_value}, expect_ec=ts4_expect_ec)
def M_setValue(self, index, new_value, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Cert.setValue method call
:param index: int16
:param new_value: address
"""
_r_ = self.C_.call_method('setValue', {'index': index, 'new_value': new_value}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_setValue(self, index, new_value, ts4_expect_ec=0):
"""
Wrapper for D4Cert.setValue signed method call
:param index: int16
:param new_value: address
"""
_r_ = self.C_.call_method_signed('setValue', {'index': index, 'new_value': new_value}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def resetValue(self, index, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Cert.resetValue
:rtype:
:param index: int16
"""
if ts4_sign:
return self.S_resetValue(index, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_resetValue(index, ts4_expect_ec=ts4_expect_ec)
def G_resetValue(self, index, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Cert.resetValue getter
:rtype:
:param index: int16
"""
return self.C_.call_getter('resetValue', {'index': index}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_resetValue(self, index, ts4_expect_ec=0):
"""
Wrapper for D4Cert.resetValue raw getter
:rtype:
:param index: int16
"""
return self.C_.call_getter_raw('resetValue', {'index': index}, expect_ec=ts4_expect_ec)
def M_resetValue(self, index, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Cert.resetValue method call
:param index: int16
"""
_r_ = self.C_.call_method('resetValue', {'index': index}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_resetValue(self, index, ts4_expect_ec=0):
"""
Wrapper for D4Cert.resetValue signed method call
:param index: int16
"""
_r_ = self.C_.call_method_signed('resetValue', {'index': index}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def clearValues(self, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Cert.clearValues
:rtype:
"""
if ts4_sign:
return self.S_clearValues(ts4_expect_ec=ts4_expect_ec)
else:
return self.M_clearValues(ts4_expect_ec=ts4_expect_ec)
def G_clearValues(self, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Cert.clearValues getter
:rtype:
"""
return self.C_.call_getter('clearValues', {}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_clearValues(self, ts4_expect_ec=0):
"""
Wrapper for D4Cert.clearValues raw getter
:rtype:
"""
return self.C_.call_getter_raw('clearValues', {}, expect_ec=ts4_expect_ec)
def M_clearValues(self, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Cert.clearValues method call
"""
_r_ = self.C_.call_method('clearValues', {}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_clearValues(self, ts4_expect_ec=0):
"""
Wrapper for D4Cert.clearValues signed method call
"""
_r_ = self.C_.call_method_signed('clearValues', {}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def registrationPreflight(self, requestId, _answer_id=0, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Cert.registrationPreflight
:rtype: uint128, uint32
:param _answer_id: uint32
:param requestId: uint128
"""
return self.G_registrationPreflight(requestId, _answer_id=0, ts4_expect_ec=ts4_expect_ec)
def G_registrationPreflight(self, requestId, _answer_id=0, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Cert.registrationPreflight getter
:rtype: uint128, uint32
:param _answer_id: uint32
:param requestId: uint128
"""
return self.C_.call_getter('registrationPreflight', {'_answer_id': _answer_id, 'requestId': requestId}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_registrationPreflight(self, requestId, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Cert.registrationPreflight raw getter
:rtype: uint128, uint32
:param _answer_id: uint32
:param requestId: uint128
"""
return self.C_.call_getter_raw('registrationPreflight', {'_answer_id': _answer_id, 'requestId': requestId}, expect_ec=ts4_expect_ec)
def M_registrationPreflight(self, requestId, _answer_id=0, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Cert.registrationPreflight method call
:param _answer_id: uint32
:param requestId: uint128
"""
_r_ = self.C_.call_method('registrationPreflight', {'_answer_id': _answer_id, 'requestId': requestId}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_registrationPreflight(self, requestId, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Cert.registrationPreflight signed method call
:param _answer_id: uint32
:param requestId: uint128
"""
_r_ = self.C_.call_method_signed('registrationPreflight', {'_answer_id': _answer_id, 'requestId': requestId}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def applyAuctionResult(self, new_owner, new_expiry, _answer_id=0, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Cert.applyAuctionResult
:rtype: bool
:param _answer_id: uint32
:param new_owner: address
:param new_expiry: uint32
"""
return self.G_applyAuctionResult(new_owner, new_expiry, _answer_id=0, ts4_expect_ec=ts4_expect_ec)
def G_applyAuctionResult(self, new_owner, new_expiry, _answer_id=0, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Cert.applyAuctionResult getter
:rtype: bool
:param _answer_id: uint32
:param new_owner: address
:param new_expiry: uint32
"""
return self.C_.call_getter('applyAuctionResult', {'_answer_id': _answer_id, 'new_owner': new_owner, 'new_expiry': new_expiry}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_applyAuctionResult(self, new_owner, new_expiry, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Cert.applyAuctionResult raw getter
:rtype: bool
:param _answer_id: uint32
:param new_owner: address
:param new_expiry: uint32
"""
return self.C_.call_getter_raw('applyAuctionResult', {'_answer_id': _answer_id, 'new_owner': new_owner, 'new_expiry': new_expiry}, expect_ec=ts4_expect_ec)
def M_applyAuctionResult(self, new_owner, new_expiry, _answer_id=0, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Cert.applyAuctionResult method call
:param _answer_id: uint32
:param new_owner: address
:param new_expiry: uint32
"""
_r_ = self.C_.call_method('applyAuctionResult', {'_answer_id': _answer_id, 'new_owner': new_owner, 'new_expiry': new_expiry}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_applyAuctionResult(self, new_owner, new_expiry, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Cert.applyAuctionResult signed method call
:param _answer_id: uint32
:param new_owner: address
:param new_expiry: uint32
"""
_r_ = self.C_.call_method_signed('applyAuctionResult', {'_answer_id': _answer_id, 'new_owner': new_owner, 'new_expiry': new_expiry}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def ensureExpiry(self, expiry, retval, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Cert.ensureExpiry
:rtype:
:param expiry: uint32
:param retval: bool
"""
if ts4_sign:
return self.S_ensureExpiry(expiry, retval, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_ensureExpiry(expiry, retval, ts4_expect_ec=ts4_expect_ec)
def G_ensureExpiry(self, expiry, retval, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Cert.ensureExpiry getter
:rtype:
:param expiry: uint32
:param retval: bool
"""
return self.C_.call_getter('ensureExpiry', {'expiry': expiry, 'retval': retval}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_ensureExpiry(self, expiry, retval, ts4_expect_ec=0):
"""
Wrapper for D4Cert.ensureExpiry raw getter
:rtype:
:param expiry: uint32
:param retval: bool
"""
return self.C_.call_getter_raw('ensureExpiry', {'expiry': expiry, 'retval': retval}, expect_ec=ts4_expect_ec)
def M_ensureExpiry(self, expiry, retval, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Cert.ensureExpiry method call
:param expiry: uint32
:param retval: bool
"""
_r_ = self.C_.call_method('ensureExpiry', {'expiry': expiry, 'retval': retval}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_ensureExpiry(self, expiry, retval, ts4_expect_ec=0):
"""
Wrapper for D4Cert.ensureExpiry signed method call
:param expiry: uint32
:param retval: bool
"""
_r_ = self.C_.call_method_signed('ensureExpiry', {'expiry': expiry, 'retval': retval}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def getOwner(self, _answer_id=0, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Cert.getOwner
:rtype: address
:param _answer_id: uint32
"""
return self.G_getOwner(_answer_id=0, ts4_expect_ec=ts4_expect_ec)
def G_getOwner(self, _answer_id=0, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Cert.getOwner getter
:rtype: address
:param _answer_id: uint32
"""
return self.C_.call_getter('getOwner', {'_answer_id': _answer_id}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_getOwner(self, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Cert.getOwner raw getter
:rtype: address
:param _answer_id: uint32
"""
return self.C_.call_getter_raw('getOwner', {'_answer_id': _answer_id}, expect_ec=ts4_expect_ec)
def M_getOwner(self, _answer_id=0, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Cert.getOwner method call
:param _answer_id: uint32
"""
_r_ = self.C_.call_method('getOwner', {'_answer_id': _answer_id}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_getOwner(self, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Cert.getOwner signed method call
:param _answer_id: uint32
"""
_r_ = self.C_.call_method_signed('getOwner', {'_answer_id': _answer_id}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def getPendingOwner(self, _answer_id=0, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Cert.getPendingOwner
:rtype: address
:param _answer_id: uint32
"""
return self.G_getPendingOwner(_answer_id=0, ts4_expect_ec=ts4_expect_ec)
def G_getPendingOwner(self, _answer_id=0, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Cert.getPendingOwner getter
:rtype: address
:param _answer_id: uint32
"""
return self.C_.call_getter('getPendingOwner', {'_answer_id': _answer_id}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_getPendingOwner(self, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Cert.getPendingOwner raw getter
:rtype: address
:param _answer_id: uint32
"""
return self.C_.call_getter_raw('getPendingOwner', {'_answer_id': _answer_id}, expect_ec=ts4_expect_ec)
def M_getPendingOwner(self, _answer_id=0, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Cert.getPendingOwner method call
:param _answer_id: uint32
"""
_r_ = self.C_.call_method('getPendingOwner', {'_answer_id': _answer_id}, private_key=ts4_private_key, | |
from __future__ import absolute_import
import re
import os
import time
import zlib
import math
import struct
import threading
import colorsys
from functools import partial
import sublime
import sublime_plugin
from .settings import Settings, SettingTogglerCommandMixin
from .colorizer import SchemaColorizer, all_names_to_hex, names_to_hex, xterm_to_hex, xterm8_to_hex, xterm8b_to_hex, xterm8f_to_hex
NAME = "Color Highlight"
VERSION = "1.2.0"
# Color formats:
# #000000FF
# #FFFFFF
# #FFF7
# #FFF
# rgb(255,255,255)
# rgba(255, 255, 255, 1)
# rgba(255, 255, 255, .2)
# rgba(255, 255, 255, 0.5)
# black
# rgba(white, 20%)
# 0xFFFFFF
# hsl(360, 0%, 50%)
# hsla(360, 0%, 50%, 0.5)
# hwb(360, 50%, 50%)
# lab(100, 100, 100) <-> #ff9331
# lch(100, 100, 100) <-> #ffff00
# hsv(40, 70%, 100%) <-> #ffc34d
# \033[31m
# \033[38;5;22m
# \033[38;2;0;0;255m
regex_cache = {}
re_cache = {}
def regex_factory(
named_values,
x_hex_values,
hex_values,
xterm_color_values,
rgb_values,
hsv_values,
hsl_values,
hwb_values,
lab_values,
lch_values,
):
key = (
named_values,
x_hex_values,
hex_values,
xterm_color_values,
rgb_values,
hsv_values,
hsl_values,
hwb_values,
lab_values,
lch_values,
)
try:
colors_regex, colors_regex_capture = regex_cache[key]
except KeyError:
function_colors = []
if rgb_values:
function_colors.extend([r'rgb', r'rgba'])
if hsv_values:
function_colors.extend([r'hsv', r'hsva'])
if hsl_values:
function_colors.extend([r'hsl', r'hsla'])
if hwb_values:
function_colors.append(r'hwb')
if lab_values:
function_colors.append(r'lab')
if lch_values:
function_colors.append(r'lch')
simple_colors = []
if named_values:
simple_colors.append(r'(?<![-.\w])%s(?![-.\w])' % r'(?![-.\w])|(?<![-.\w])'.join(names_to_hex.keys()))
if x_hex_values and hex_values:
simple_colors.append(r'(?:#|0x)[0-9a-fA-F]{8}\b')
simple_colors.append(r'(?:#|0x)[0-9a-fA-F]{6}\b')
simple_colors.append(r'#[0-9a-fA-F]{4}\b')
simple_colors.append(r'#[0-9a-fA-F]{3}\b')
elif x_hex_values:
simple_colors.append(r'#[0-9a-fA-F]{8}\b')
simple_colors.append(r'#[0-9a-fA-F]{6}\b')
simple_colors.append(r'#[0-9a-fA-F]{4}\b')
simple_colors.append(r'#[0-9a-fA-F]{3}\b')
elif hex_values:
simple_colors.append(r'0x[0-9a-fA-F]{8}\b')
simple_colors.append(r'0x[0-9a-fA-F]{6}\b')
if xterm_color_values:
simple_colors.append(r'(?:\x1b|\\033|\\x1b|\\u001b|\\e|\\E)\[\d{1,3}(?:;\d{1,3})*m')
colors_regex = []
if function_colors:
num = r'\s*([-+]?(?:[0-9]*\.\d+|[0-9]+)(?:%|deg)?)\s*'
sc = r'|(%s)' % r'|'.join(simple_colors) if simple_colors else r''
colors_regex.append(r'(%s)\((?:%s,%s,%s%s)(?:,%s)?\)' % (r'|'.join(function_colors), num, num, num, sc, num))
if simple_colors:
colors_regex.append(r'(%s)' % r'|'.join(simple_colors))
colors_regex = r'|'.join(colors_regex)
if function_colors and simple_colors:
colors_regex_capture = r'\1|\2\5\7,\3,\4,\6'
elif function_colors:
colors_regex_capture = r'\1|\2,\3,\4,\5'
elif simple_colors:
colors_regex_capture = r'|\1'
else:
colors_regex_capture = ''
regex_cache[key] = colors_regex, colors_regex_capture
return colors_regex, colors_regex_capture
def re_factory(
named_values,
x_hex_values,
hex_values,
xterm_color_values,
rgb_values,
hsv_values,
hsl_values,
hwb_values,
lab_values,
lch_values,
):
key = (
named_values,
x_hex_values,
hex_values,
xterm_color_values,
rgb_values,
hsv_values,
hsl_values,
hwb_values,
lab_values,
lch_values,
)
try:
colors_re, colors_re_capture = re_cache[key]
except KeyError:
colors_regex, colors_regex_capture = regex_factory(
named_values=named_values,
x_hex_values=x_hex_values,
hex_values=hex_values,
xterm_color_values=xterm_color_values,
rgb_values=rgb_values,
hsv_values=hsv_values,
hsl_values=hsl_values,
hwb_values=hwb_values,
lab_values=lab_values,
lch_values=lch_values,
)
colors_re = re.compile(colors_regex)
colors_re_capture = re.sub(r'\\([0-9])', lambda m: chr(int(m.group(1))), colors_regex_capture)
re_cache[key] = colors_re, colors_re_capture
return colors_re, colors_re_capture
def hsv_to_rgb(h, s, v):
# h -> [0, 360)
# s -> [0, 100]
# l -> [0, 100]
H = h / 360.0
S = s / 100.0
V = v / 100.0
RR, GG, BB = colorsys.hsv_to_rgb(H, S, V)
return int(RR * 255), int(GG * 255), int(BB * 255)
def hsl_to_rgb(h, s, l):
# h -> [0, 360)
# s -> [0, 100]
# l -> [0, 100]
H = h / 360.0
S = s / 100.0
L = l / 100.0
RR, GG, BB = colorsys.hls_to_rgb(H, L, S)
return int(RR * 255), int(GG * 255), int(BB * 255)
def hwb_to_rgb(h, w, b):
# h -> [0, 360)
# w -> [0, 100]
# b -> [0, 100]
H = h / 360.0
W = w / 100.0
B = b / 100.0
RR, GG, BB = colorsys.hls_to_rgb(H, 0.5, 1)
RR = RR * (1 - W - B) + W
GG = GG * (1 - W - B) + W
BB = BB * (1 - W - B) + W
r, g, b = int(RR * 255), int(GG * 255), int(BB * 255)
r = 0 if r < 0 else 255 if r > 255 else r
g = 0 if g < 0 else 255 if g > 255 else g
b = 0 if b < 0 else 255 if b > 255 else b
return r, g, b
def lab_to_rgb(L, a, b):
# L -> [0, 100]
# a -> [-160, 160]
# b -> [-160, 160]
Y = (L + 16.0) / 116.0
X = a / 500.0 + Y
Z = Y - b / 200.0
Y3 = Y ** 3.0
Y = Y3 if Y3 > 0.008856 else (Y - 16.0 / 116.0) / 7.787
X3 = X ** 3.0
X = X3 if X3 > 0.008856 else (X - 16.0 / 116.0) / 7.787
Z3 = Z ** 3.0
Z = Z3 if Z3 > 0.008856 else (Z - 16.0 / 116.0) / 7.787
# Normalize white point for Observer=2°, Illuminant=D65
X *= 0.95047
Y *= 1.0
Z *= 1.08883
# XYZ to RGB
RR = X * 3.240479 + Y * -1.537150 + Z * - 0.498535
GG = X * -0.969256 + Y * 1.875992 + Z * 0.041556
BB = X * 0.055648 + Y * -0.204043 + Z * 1.057311
RR = 1.055 * RR ** (1 / 2.4) - 0.055 if RR > 0.0031308 else 12.92 * RR
GG = 1.055 * GG ** (1 / 2.4) - 0.055 if GG > 0.0031308 else 12.92 * GG
BB = 1.055 * BB ** (1 / 2.4) - 0.055 if BB > 0.0031308 else 12.92 * BB
r, g, b = int(RR * 255), int(GG * 255), int(BB * 255)
r = 0 if r < 0 else 255 if r > 255 else r
g = 0 if g < 0 else 255 if g > 255 else g
b = 0 if b < 0 else 255 if b > 255 else b
return r, g, b
def lch_to_lab(L, c, h):
# L -> [0, 100]
# c -> [0, 230]
# h -> [0, 360)
a = c * math.cos(math.radians(h))
b = c * math.sin(math.radians(h))
return L, a, b
def lch_to_rgb(L, c, h):
L, a, b = lch_to_lab(L, c, h)
return lab_to_rgb(L, a, b)
def tohex(r, g, b, a):
if g is not None and b is not None:
sr = '%X' % r
if len(sr) == 1:
sr = '0' + sr
sg = '%X' % g
if len(sg) == 1:
sg = '0' + sg
sb = '%X' % b
if len(sb) == 1:
sb = '0' + sb
else:
sr = r[1:3]
sg = r[3:5]
sb = r[5:7]
sa = '%X' % int(a / 100.0 * 255)
if len(sa) == 1:
sa = '0' + sa
return '#%s%s%s%s' % (sr, sg, sb, sa)
# Full PNG is: PNG_HEAD + PNG_IHDR + PNG_IDAT[mode] + PNG_IEND
PNG_HEAD = b'\x89PNG\r\n\x1a\n'
PNG_IHDR = b'\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4'
PNG_IDAT = {
'circle': b'\x00\x00\x01\x13IDATx\x9c\xed\xd6\xc1\r\xc3 \x0c@QX!g\xa4\x8c\xd0\x11:BF\xe8\x01q\xee\x1c\xdd\x82e2\x00\xb30\x00\xb5U#U\x11\x85`\xac\xe6\xc2\xe1_\xc3K\x93\xd8U)%ue\x97\x1e>\x01\x13P\x05\xac\xb7{)\x03Y\xc8C\x01\x8a\xdb\xe3\x89\x05\xc8C\x162\x90:6\n\xd0\x90\x83v(}\x07\x17?\xb6C\x0e\xd2R\x80\x05z\x1d\x0f\xae\x00r/h\x19\x05\xe8\xda\xe1\r@F\xe8\x11\x80\xab\x1d~\x02\x90\xe8q\xb0\x00\xa6\xf4\xcc\x19\x00|\'\x0c\x07`[\x87\x9f\x04`\x96\x03\xf0\x82\x00\xcf\x01\x04A@\xe0\x00\xa2 v\x03h\xc25/~\x06\x897\xc3\x01\x04A@\xff#\xa0\xd9.\x05\xe8\x7f\ti\xb1H\x01\xfa?\xc3\xed\xb3\xd5v\x01\x00\x0e\xb3\xfeADK\xc4\t\x00p\x9c\xf7\x8fb\x02hZ(\\\x00.2=\x02\xc0\x96\x1a\xa2q8\xaer5\n\xc8\xbf\x84+\xbd\x13?\x9e\xb9\xcbw.\x05\xc8\x19\xfa:<\xcd\x89H\x133\xd0\xee\xc0\x05f\xd6\xc2\xdf\xb9n\xc0\xbf\x9a\x80\t\xb8\x1c\xf0\x06-\x9f\xcd\xf4\x17\xe9(\x03',
'square': b'\x00\x00\x00\x4aIDATx\x9c\xed\xceA\r\x00 \x0cC\xd19A\x02\x12\x90\x80\x04$\xe0\xff\xd49 =\xb1,\xf9\x87\x7fm_H\x8a\xcaJ\xcf\x01\x00x\x02\xc6\\r\xda\xe7Z\x01\x00\x00\x00@?\x80;\xecB\x01\x00\x00\x00\xa0\x1f\xe0W\x00\x00\x94\x03\x12\\\xf0$\x87\xd4i\x0c\x98',
'fill': b'\x00\x00\x00\x40IDATx\x9c\xed\xcf1\x11\x00 \x10\x03\xc1w\x82\x04$ \x01\tH\xc0\x7f\x05"R|\xb3\xc5\xb5\x99M\x8d\xb9^\xd2>7\xaa\x00\x00\x00\x00\x00\x00\x00\xda\x01\xe9@z\x00\x00\x00\x00\x00\x00\x00\xa0\x1d\xf0\x01\xb4]Pj]\x9av\xf7',
}
PNG_IEND = b'\x00\x00\x00\x00IEND\xaeB`\x82'
PNG_RE = re.compile(b'\\x1f\\x2f\\x3f|\\x4f\\x5f\\x6f')
PNG_DATA = {
'circle': zlib.decompress(PNG_IDAT['circle'][8:-4]),
'square': zlib.decompress(PNG_IDAT['square'][8:-4]),
'fill': zlib.decompress(PNG_IDAT['fill'][8:-4]),
}
DEFAULT_GUTTER_ICON = 'circle'
def toicon(name, gutter_icon=True, light=True):
base_path = os.path.join(sublime.packages_path(), 'User', '%s.cache' % NAME)
if not os.path.exists(base_path):
os.mkdir(base_path)
if gutter_icon not in PNG_DATA:
gutter_icon = DEFAULT_GUTTER_ICON
icon_path = os.path.join(base_path, name + '_' + gutter_icon + '.png')
if not os.path.exists(icon_path):
r = int(name[4:6], 16)
g = int(name[6:8], 16)
b = int(name[8:10], 16)
a = int(name[10:12] or 'ff', 16) / 255.0
# print("r={} g={} b={} a={}".format(r, g, b, a))
if light:
x = 0xff * (1 - a)
y = 0xcc * (1 - a)
else:
x = 0x99 * (1 - a)
y = 0x66 * (1 - a)
r *= a
g *= a
b *= a
# print("x(r={} g={} b={}), y(r={} g={} b={})".format(int(r + x), int(g + x), int(b + x), int(r + y), int(g + y), int(b + y)))
I1 = lambda v: struct.pack("!B", v & (2**8 - 1))
I4 = lambda v: struct.pack("!I", v & (2**32 - 1))
png = PNG_HEAD + PNG_IHDR
col_map = {
b'\x1f\x2f\x3f': I1(int(r + x)) + I1(int(g + x)) + I1(int(b + x)),
b'\x4f\x5f\x6f': I1(int(r + y)) + I1(int(g + y)) + I1(int(b + y)),
}
data = PNG_RE.sub(lambda m: col_map[m.group(0)], PNG_DATA[gutter_icon])
compressed = zlib.compress(data)
idat = b'IDAT' + compressed
png += I4(len(compressed)) + idat + I4(zlib.crc32(idat))
png += PNG_IEND
with open(icon_path, 'wb') as fp:
fp.write(png)
relative_icon_path = os.path.relpath(icon_path, os.path.dirname(sublime.packages_path()))
relative_icon_path = relative_icon_path.replace('\\', '/')
return relative_icon_path
# Commands
# treat hex vals as colors
class ColorHighlightCommand(sublime_plugin.WindowCommand):
def run_(self, edit_token, args={}):
view = self.window.active_view()
view.run_command('color_highlight', args)
def is_enabled(self):
return True
class ColorHighlightEnableLoadSaveCommand(ColorHighlightCommand):
def is_enabled(self):
enabled = super(ColorHighlightEnableLoadSaveCommand, self).is_enabled()
if enabled:
if settings.get('highlight') == 'load-save':
return False
return enabled
class ColorHighlightEnableSaveOnlyCommand(ColorHighlightCommand):
def is_enabled(self):
enabled = super(ColorHighlightEnableSaveOnlyCommand, self).is_enabled()
if enabled:
if settings.get('highlight') == 'save-only':
return False
return enabled
class ColorHighlightDisableCommand(ColorHighlightCommand):
def is_enabled(self):
enabled = super(ColorHighlightDisableCommand, self).is_enabled()
if enabled:
if settings.get('highlight') is | |
Listeners=[{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
client.register_instances_with_load_balancer(
LoadBalancerName="my-lb",
Instances=[{"InstanceId": instance_id1}, {"InstanceId": instance_id2}],
)
balancer = client.describe_load_balancers()["LoadBalancerDescriptions"][0]
instance_ids = [instance["InstanceId"] for instance in balancer["Instances"]]
set(instance_ids).should.equal(set([instance_id1, instance_id2]))
# Has boto3 equivalent
@mock_ec2_deprecated
@mock_elb_deprecated
def test_deregister_instances():
ec2_conn = boto.connect_ec2()
reservation = ec2_conn.run_instances(EXAMPLE_AMI_ID, 2)
instance_id1 = reservation.instances[0].id
instance_id2 = reservation.instances[1].id
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
lb.register_instances([instance_id1, instance_id2])
balancer = conn.get_all_load_balancers()[0]
balancer.instances.should.have.length_of(2)
balancer.deregister_instances([instance_id1])
balancer.instances.should.have.length_of(1)
balancer.instances[0].id.should.equal(instance_id2)
@mock_ec2
@mock_elb
def test_deregister_instances_boto3():
ec2 = boto3.resource("ec2", region_name="us-east-1")
response = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=2, MaxCount=2)
instance_id1 = response[0].id
instance_id2 = response[1].id
client = boto3.client("elb", region_name="us-east-1")
client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
client.register_instances_with_load_balancer(
LoadBalancerName="my-lb",
Instances=[{"InstanceId": instance_id1}, {"InstanceId": instance_id2}],
)
balancer = client.describe_load_balancers()["LoadBalancerDescriptions"][0]
balancer["Instances"].should.have.length_of(2)
client.deregister_instances_from_load_balancer(
LoadBalancerName="my-lb", Instances=[{"InstanceId": instance_id1}]
)
balancer = client.describe_load_balancers()["LoadBalancerDescriptions"][0]
balancer["Instances"].should.have.length_of(1)
balancer["Instances"][0]["InstanceId"].should.equal(instance_id2)
# Has boto3 equivalent
@mock_elb_deprecated
def test_default_attributes():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
attributes = lb.get_attributes()
attributes.cross_zone_load_balancing.enabled.should.be.false
attributes.connection_draining.enabled.should.be.false
attributes.access_log.enabled.should.be.false
attributes.connecting_settings.idle_timeout.should.equal(60)
@mock_elb
def test_default_attributes_boto3():
lb_name = str(uuid4())[0:6]
client = boto3.client("elb", region_name="us-east-1")
client.create_load_balancer(
LoadBalancerName=lb_name,
Listeners=[{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a"],
)
attributes = client.describe_load_balancer_attributes(LoadBalancerName=lb_name)[
"LoadBalancerAttributes"
]
attributes.should.have.key("CrossZoneLoadBalancing").equal({"Enabled": False})
attributes.should.have.key("AccessLog").equal({"Enabled": False})
attributes.should.have.key("ConnectionDraining").equal({"Enabled": False})
attributes.should.have.key("ConnectionSettings").equal({"IdleTimeout": 60})
# Has boto3 equivalent
@mock_elb_deprecated
def test_cross_zone_load_balancing_attribute():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", True)
attributes = lb.get_attributes(force=True)
attributes.cross_zone_load_balancing.enabled.should.be.true
conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", False)
attributes = lb.get_attributes(force=True)
attributes.cross_zone_load_balancing.enabled.should.be.false
@mock_elb
def test_cross_zone_load_balancing_attribute_boto3():
lb_name = str(uuid4())[0:6]
client = boto3.client("elb", region_name="us-east-1")
client.create_load_balancer(
LoadBalancerName=lb_name,
Listeners=[{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a"],
)
client.modify_load_balancer_attributes(
LoadBalancerName=lb_name,
LoadBalancerAttributes={"CrossZoneLoadBalancing": {"Enabled": True}},
)
attributes = client.describe_load_balancer_attributes(LoadBalancerName=lb_name)[
"LoadBalancerAttributes"
]
# Bug: This property is not properly propagated
attributes.should.have.key("CrossZoneLoadBalancing").equal({"Enabled": False})
attributes.should.have.key("AccessLog").equal({"Enabled": False})
attributes.should.have.key("ConnectionDraining").equal({"Enabled": False})
attributes.should.have.key("ConnectionSettings").equal({"IdleTimeout": 60})
client.modify_load_balancer_attributes(
LoadBalancerName=lb_name,
LoadBalancerAttributes={"CrossZoneLoadBalancing": {"Enabled": False}},
)
attributes = client.describe_load_balancer_attributes(LoadBalancerName=lb_name)[
"LoadBalancerAttributes"
]
attributes.should.have.key("CrossZoneLoadBalancing").equal({"Enabled": False})
# Has boto3 equivalent
@mock_elb_deprecated
def test_connection_draining_attribute():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
connection_draining = ConnectionDrainingAttribute()
connection_draining.enabled = True
connection_draining.timeout = 60
conn.modify_lb_attribute("my-lb", "ConnectionDraining", connection_draining)
attributes = lb.get_attributes(force=True)
attributes.connection_draining.enabled.should.be.true
attributes.connection_draining.timeout.should.equal(60)
connection_draining.timeout = 30
conn.modify_lb_attribute("my-lb", "ConnectionDraining", connection_draining)
attributes = lb.get_attributes(force=True)
attributes.connection_draining.timeout.should.equal(30)
connection_draining.enabled = False
conn.modify_lb_attribute("my-lb", "ConnectionDraining", connection_draining)
attributes = lb.get_attributes(force=True)
attributes.connection_draining.enabled.should.be.false
@mock_elb
def test_connection_draining_attribute_boto3():
lb_name = str(uuid4())[0:6]
client = boto3.client("elb", region_name="us-east-1")
client.create_load_balancer(
LoadBalancerName=lb_name,
Listeners=[{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a"],
)
client.modify_load_balancer_attributes(
LoadBalancerName=lb_name,
LoadBalancerAttributes={"ConnectionDraining": {"Enabled": True, "Timeout": 42}},
)
attributes = client.describe_load_balancer_attributes(LoadBalancerName=lb_name)[
"LoadBalancerAttributes"
]
attributes.should.have.key("ConnectionDraining").equal(
{"Enabled": True, "Timeout": 42}
)
client.modify_load_balancer_attributes(
LoadBalancerName=lb_name,
LoadBalancerAttributes={"ConnectionDraining": {"Enabled": False}},
)
attributes = client.describe_load_balancer_attributes(LoadBalancerName=lb_name)[
"LoadBalancerAttributes"
]
attributes.should.have.key("ConnectionDraining").equal({"Enabled": False})
# This does not work in Boto3, so we can't write a equivalent test
# Moto always looks for attribute 's3_bucket_name', but Boto3 sends 'S3BucketName'
# We'll need to rewrite this feature completely anyway, to get rid of the boto-objects
@mock_elb_deprecated
def test_access_log_attribute():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
access_log = AccessLogAttribute()
access_log.enabled = True
access_log.s3_bucket_name = "bucket"
access_log.s3_bucket_prefix = "prefix"
access_log.emit_interval = 60
conn.modify_lb_attribute("my-lb", "AccessLog", access_log)
attributes = lb.get_attributes(force=True)
attributes.access_log.enabled.should.be.true
attributes.access_log.s3_bucket_name.should.equal("bucket")
attributes.access_log.s3_bucket_prefix.should.equal("prefix")
attributes.access_log.emit_interval.should.equal(60)
access_log.enabled = False
conn.modify_lb_attribute("my-lb", "AccessLog", access_log)
attributes = lb.get_attributes(force=True)
attributes.access_log.enabled.should.be.false
# This does not work in Boto3, so we can't write a equivalent test
# Moto always looks for attribute 'idle_timeout', but Boto3 sends 'IdleTimeout'
# We'll need to rewrite this feature completely anyway, to get rid of the boto-objects
@mock_elb_deprecated
def test_connection_settings_attribute():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
connection_settings = ConnectionSettingAttribute(conn)
connection_settings.idle_timeout = 120
conn.modify_lb_attribute("my-lb", "ConnectingSettings", connection_settings)
attributes = lb.get_attributes(force=True)
attributes.connecting_settings.idle_timeout.should.equal(120)
connection_settings.idle_timeout = 60
conn.modify_lb_attribute("my-lb", "ConnectingSettings", connection_settings)
attributes = lb.get_attributes(force=True)
attributes.connecting_settings.idle_timeout.should.equal(60)
# Has boto3 equivalent
@mock_elb_deprecated
def test_create_lb_cookie_stickiness_policy():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
cookie_expiration_period = 60
policy_name = "LBCookieStickinessPolicy"
lb.create_cookie_stickiness_policy(cookie_expiration_period, policy_name)
lb = conn.get_all_load_balancers()[0]
# There appears to be a quirk about boto, whereby it returns a unicode
# string for cookie_expiration_period, despite being stated in
# documentation to be a long numeric.
#
# To work around that, this value is converted to an int and checked.
cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[
0
].cookie_expiration_period
int(cookie_expiration_period_response_str).should.equal(cookie_expiration_period)
lb.policies.lb_cookie_stickiness_policies[0].policy_name.should.equal(policy_name)
@mock_elb
def test_create_lb_cookie_stickiness_policy_boto3():
lb_name = str(uuid4())[0:6]
client = boto3.client("elb", region_name="us-east-1")
client.create_load_balancer(
LoadBalancerName=lb_name,
Listeners=[{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a"],
)
balancer = client.describe_load_balancers(LoadBalancerNames=[lb_name])[
"LoadBalancerDescriptions"
][0]
lbc_policies = balancer["Policies"]["LBCookieStickinessPolicies"]
lbc_policies.should.have.length_of(0)
client.create_lb_cookie_stickiness_policy(
LoadBalancerName=lb_name, PolicyName="pname", CookieExpirationPeriod=42
)
balancer = client.describe_load_balancers(LoadBalancerNames=[lb_name])[
"LoadBalancerDescriptions"
][0]
policies = balancer["Policies"]
lbc_policies = policies["LBCookieStickinessPolicies"]
lbc_policies.should.have.length_of(1)
lbc_policies[0].should.equal({"PolicyName": "pname", "CookieExpirationPeriod": 42})
# Has boto3 equivalent
@mock_elb_deprecated
def test_create_lb_cookie_stickiness_policy_no_expiry():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
policy_name = "LBCookieStickinessPolicy"
lb.create_cookie_stickiness_policy(None, policy_name)
lb = conn.get_all_load_balancers()[0]
lb.policies.lb_cookie_stickiness_policies[0].cookie_expiration_period.should.be.none
lb.policies.lb_cookie_stickiness_policies[0].policy_name.should.equal(policy_name)
@mock_elb
def test_create_lb_cookie_stickiness_policy_no_expiry_boto3():
lb_name = str(uuid4())[0:6]
client = boto3.client("elb", region_name="us-east-1")
client.create_load_balancer(
LoadBalancerName=lb_name,
Listeners=[{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a"],
)
balancer = client.describe_load_balancers(LoadBalancerNames=[lb_name])[
"LoadBalancerDescriptions"
][0]
lbc_policies = balancer["Policies"]["LBCookieStickinessPolicies"]
lbc_policies.should.have.length_of(0)
client.create_lb_cookie_stickiness_policy(
LoadBalancerName=lb_name, PolicyName="pname"
)
balancer = client.describe_load_balancers(LoadBalancerNames=[lb_name])[
"LoadBalancerDescriptions"
][0]
policies = balancer["Policies"]
lbc_policies = policies["LBCookieStickinessPolicies"]
lbc_policies.should.have.length_of(1)
lbc_policies[0].should.equal({"PolicyName": "pname"})
# Has boto3 equivalent
@mock_elb_deprecated
def test_create_app_cookie_stickiness_policy():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
cookie_name = "my-stickiness-policy"
policy_name = "AppCookieStickinessPolicy"
lb.create_app_cookie_stickiness_policy(cookie_name, policy_name)
lb = conn.get_all_load_balancers()[0]
lb.policies.app_cookie_stickiness_policies[0].cookie_name.should.equal(cookie_name)
lb.policies.app_cookie_stickiness_policies[0].policy_name.should.equal(policy_name)
@mock_elb
def test_create_app_cookie_stickiness_policy_boto3():
lb_name = str(uuid4())[0:6]
client = boto3.client("elb", region_name="us-east-1")
client.create_load_balancer(
LoadBalancerName=lb_name,
Listeners=[{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a"],
)
balancer = client.describe_load_balancers(LoadBalancerNames=[lb_name])[
"LoadBalancerDescriptions"
][0]
lbc_policies = balancer["Policies"]["AppCookieStickinessPolicies"]
lbc_policies.should.have.length_of(0)
client.create_app_cookie_stickiness_policy(
LoadBalancerName=lb_name, PolicyName="pname", CookieName="cname"
)
balancer = client.describe_load_balancers(LoadBalancerNames=[lb_name])[
"LoadBalancerDescriptions"
][0]
policies = balancer["Policies"]
lbc_policies = policies["AppCookieStickinessPolicies"]
lbc_policies.should.have.length_of(1)
lbc_policies[0].should.equal({"CookieName": "cname", "PolicyName": "pname"})
# Has boto3 equivalent
@mock_elb_deprecated
def test_create_lb_policy():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
policy_name = "ProxyPolicy"
lb.create_lb_policy(policy_name, "ProxyProtocolPolicyType", {"ProxyProtocol": True})
lb = conn.get_all_load_balancers()[0]
lb.policies.other_policies[0].policy_name.should.equal(policy_name)
@mock_elb
def test_create_lb_policy_boto3():
lb_name = str(uuid4())[0:6]
client = boto3.client("elb", region_name="us-east-1")
client.create_load_balancer(
LoadBalancerName=lb_name,
Listeners=[{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a"],
)
client.create_load_balancer_policy(
LoadBalancerName=lb_name,
PolicyName="ProxyPolicy",
PolicyTypeName="ProxyProtocolPolicyType",
PolicyAttributes=[
{"AttributeName": "ProxyProtocol", "AttributeValue": "true",},
],
)
balancer = client.describe_load_balancers(LoadBalancerNames=[lb_name])[
"LoadBalancerDescriptions"
][0]
policies = balancer["Policies"]
policies.should.have.key("OtherPolicies").equal(["ProxyPolicy"])
# Has boto3 equivalent
@mock_elb_deprecated
def test_set_policies_of_listener():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
listener_port = 80
policy_name = "my-stickiness-policy"
# boto docs currently state that zero or one policy may be associated
# with a given listener
# in a real flow, it is necessary first to create a policy,
# then to set that policy to the listener
lb.create_cookie_stickiness_policy(None, policy_name)
lb.set_policies_of_listener(listener_port, [policy_name])
lb = conn.get_all_load_balancers()[0]
listener = lb.listeners[0]
listener.load_balancer_port.should.equal(listener_port)
# by contrast to a backend, a listener stores only policy name strings
listener.policy_names[0].should.equal(policy_name)
@mock_elb
def test_set_policies_of_listener_boto3():
lb_name = str(uuid4())[0:6]
client = boto3.client("elb", region_name="us-east-1")
client.create_load_balancer(
LoadBalancerName=lb_name,
Listeners=[
{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080},
{"Protocol": "https", "LoadBalancerPort": 81, "InstancePort": 8081},
],
AvailabilityZones=["us-east-1a"],
)
client.create_app_cookie_stickiness_policy(
LoadBalancerName=lb_name, PolicyName="pname", CookieName="cname"
)
client.set_load_balancer_policies_of_listener(
LoadBalancerName=lb_name, LoadBalancerPort=81, PolicyNames=["pname"]
)
balancer = client.describe_load_balancers(LoadBalancerNames=[lb_name])[
"LoadBalancerDescriptions"
][0]
http_l = [
l
for l in balancer["ListenerDescriptions"]
if l["Listener"]["Protocol"] == "HTTP"
][0]
http_l.should.have.key("PolicyNames").should.equal([])
https_l = [
l
for l in balancer["ListenerDescriptions"]
if l["Listener"]["Protocol"] == "HTTPS"
][0]
https_l.should.have.key("PolicyNames").should.equal(["pname"])
# Has boto3 equivalent
@mock_elb_deprecated
def test_set_policies_of_backend_server():
conn = boto.connect_elb()
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", [], ports)
instance_port = 8080
policy_name = "ProxyPolicy"
# in a real flow, it is necessary first to create a policy,
# then to set that policy to the backend
lb.create_lb_policy(policy_name, "ProxyProtocolPolicyType", {"ProxyProtocol": True})
lb.set_policies_of_backend_server(instance_port, [policy_name])
lb = conn.get_all_load_balancers()[0]
backend = lb.backends[0]
backend.instance_port.should.equal(instance_port)
# by contrast to a listener, a backend stores OtherPolicy objects
backend.policies[0].policy_name.should.equal(policy_name)
@mock_elb
def test_set_policies_of_backend_server_boto3():
lb_name = str(uuid4())[0:6]
client = boto3.client("elb", region_name="us-east-1")
client.create_load_balancer(
LoadBalancerName=lb_name,
Listeners=[
{"Protocol": "http", "LoadBalancerPort": 80, "InstancePort": 8080},
{"Protocol": "https", "LoadBalancerPort": 81, "InstancePort": 8081},
],
AvailabilityZones=["us-east-1a"],
)
client.create_app_cookie_stickiness_policy(
LoadBalancerName=lb_name, PolicyName="pname", CookieName="cname"
)
client.set_load_balancer_policies_for_backend_server(
LoadBalancerName=lb_name, InstancePort=8081, PolicyNames=["pname"]
)
balancer = client.describe_load_balancers(LoadBalancerNames=[lb_name])[
"LoadBalancerDescriptions"
][0]
balancer.should.have.key("BackendServerDescriptions")
desc = balancer["BackendServerDescriptions"]
desc.should.have.length_of(1)
desc[0].should.equal({"InstancePort": 8081, "PolicyNames": ["pname"]})
# Has boto3 equivalent
@mock_ec2_deprecated
@mock_elb_deprecated
def test_describe_instance_health():
ec2_conn = boto.connect_ec2()
reservation = ec2_conn.run_instances(EXAMPLE_AMI_ID, 2)
instance_id1 = reservation.instances[0].id
instance_id2 = reservation.instances[1].id
conn = boto.connect_elb()
zones = ["us-east-1a", "us-east-1b"]
ports = [(80, 8080, "http"), (443, 8443, "tcp")]
lb = conn.create_load_balancer("my-lb", zones, ports)
instances_health = conn.describe_instance_health("my-lb")
instances_health.should.be.empty
lb.register_instances([instance_id1, instance_id2])
instances_health = conn.describe_instance_health("my-lb")
instances_health.should.have.length_of(2)
for instance_health in instances_health:
instance_health.instance_id.should.be.within([instance_id1, instance_id2])
instance_health.state.should.equal("InService")
instances_health = conn.describe_instance_health("my-lb", [instance_id1])
instances_health.should.have.length_of(1)
instances_health[0].instance_id.should.equal(instance_id1)
instances_health[0].state.should.equal("InService")
@mock_ec2
@mock_elb
def test_describe_instance_health_boto3():
elb = boto3.client("elb", region_name="us-east-1")
ec2 = boto3.client("ec2", region_name="us-east-1")
instances = ec2.run_instances(ImageId=EXAMPLE_AMI_ID, MinCount=2, MaxCount=2)[
"Instances"
]
lb_name = "my_load_balancer"
elb.create_load_balancer(
Listeners=[{"InstancePort": 80, "LoadBalancerPort": 8080, "Protocol": "HTTP"}],
LoadBalancerName=lb_name,
)
elb.register_instances_with_load_balancer(
LoadBalancerName=lb_name, Instances=[{"InstanceId": instances[0]["InstanceId"]}]
)
instances_health = elb.describe_instance_health(
| |
= int(self.widgets[self.iname + "_opd_i"].GetValue())
self.inst.pupilopd = (self.inst._datapath + os.sep + "OPD" + os.sep + self.opd_name, self.opd_i) # filename, slice
self.log("Selected OPD is " + str(self.opd_name))
if self.iname + "_coron" in self.widgets:
self.inst.image_mask = self.widgets[self.iname + "_coron"].GetValue()
self.inst.pupil_mask = self.widgets[self.iname + "_pupil"].GetValue()
# TODO read in mis-registration options here.
options['source_offset_r'] = float(self.widgets["source_off_r"].GetValue())
options['source_offset_theta'] = float(self.widgets["source_off_theta"].GetValue())
options['pupil_shift_x'] = float(
self.widgets[self.iname + "_pupilshift_x"].GetValue()) / 100. # convert from percent to fraction
options['pupil_shift_y'] = float(
self.widgets[self.iname + "_pupilshift_y"].GetValue()) / 100. # convert from percent to fraction
self.inst.options = options
def _getFOV(self):
""" Get field of view, either as a scalar number for square or a
2-element ndarray for rectangular
Note that if it is a 2-element paid, we flip the order of the elements.
This is to facilitate a more intuitive "x,y" ordering for the user interface.
"""
fovstr = self.widgets['FOV'].GetValue()
try:
if ',' in fovstr:
parts = fovstr.split(',')
return np.asarray(parts[0:2], dtype=float)[::-1]
else:
return float(self.widgets['FOV'].GetValue())
except:
_log.error("Invalid entry in FOV field. Please check it and try again")
raise ValueError("Invalid entry in FOV field. Please check it and try again")
def _setFOV(self, newvalue):
""" Get field of view, either as a scalar number for square or a
2-element ndarray for rectangular
Note that if it is a 2-element paid, we flip the order of the elements.
This is to facilitate a more intuitive "x,y" ordering for the user interface.
"""
if hasattr(newvalue, '__iter__'):
newstring = ",".join((str(newvalue[1]), str(newvalue[0])))
else:
newstring = str(newvalue)
self.widgets['FOV'].SetValue(newstring)
# -------------------------------------------------------------------------
# Class to run the actual PSF calculation in a background thread, to keep the
# GUI still responsive
# This code based on examples at http://wiki.wxpython.org/LongRunningTasks
# and http://www.blog.pythonlibrary.org/2010/05/22/wxpython-and-threads/
class PSFCalcThread(Thread):
def __init__(self):
"""Init Worker Thread Class."""
Thread.__init__(self)
self.start() # start the thread
def runPSFCalc(self, instrument, masterapp):
if _HAS_PYSYNPHOT:
source = poppy.specFromSpectralType(masterapp.sptype)
else:
source = None # generic flat spectrum
print("starting calc in thread")
if instrument.options['fov_in_arcsec']:
fov_arcsec = masterapp.FOV
fov_pixels = None
else:
fov_arcsec = None
fov_pixels = masterapp.FOV
PSF_HDUlist = instrument.calc_psf(source=source,
detector_oversample=masterapp.detector_oversampling,
fft_oversample=masterapp.fft_oversampling,
fov_arcsec=fov_arcsec, fov_pixels=fov_pixels,
nlambda=masterapp.nlambda,
monochromatic=masterapp.monochromatic_wavelength,
display=True)
wx.PostEvent(masterapp, ResultEvent(PSF_HDUlist)) # send results back to master thread
# Define notification event for thread completion
EVT_RESULT_ID = wx.NewId()
def EVT_RESULT(win, func):
"""Define Result Event."""
win.Connect(-1, -1, EVT_RESULT_ID, func)
class ResultEvent(wx.PyEvent):
"""Simple event to carry arbitrary result data."""
def __init__(self, data):
"""Init Result Event."""
wx.PyEvent.__init__(self)
self.SetEventType(EVT_RESULT_ID)
self.data = data
# -------------------------------------------------------------------------
class WebbPSFMenuBar(wx.MenuBar):
def __init__(self, parent):
wx.MenuBar.__init__(self)
item_keys = ['save_psf', 'save_profile', 'documentation', 'preferences', 'calc_options', 'calc_psf',
'display_spectrum', 'display_optics', 'display_opd', 'display_psf', 'display_profiles']
self.ids = {}
for key in item_keys:
self.ids[key] = wx.NewId()
# File menu
filemenu = wx.Menu()
self.SavePSF = filemenu.Append(self.ids['save_psf'], '&Save PSF as...\tCtrl+Shift+S')
self.SaveProfile = filemenu.Append(self.ids['save_profile'], 'Save &profile data as...\tCtrl+Shift+P')
filemenu.AppendSeparator()
self.Preferences = filemenu.Append(self.ids['preferences'], 'Preferences...\tCtrl+,')
filemenu.AppendSeparator()
self.Exit = filemenu.Append(wx.ID_EXIT, 'E&xit', 'Exit this program')
# these start out disabled since no PSF calculated yet:
self.SavePSF.Enable(False)
self.SaveProfile.Enable(False)
# Edit menu
editmenu = wx.Menu()
editmenu.Append(wx.ID_CUT, 'Cut\tCtrl-X')
editmenu.Append(wx.ID_COPY, 'Copy\tCtrl-C')
editmenu.Append(wx.ID_PASTE, 'Paste\tCtrl-V')
# Calculation Menu
calcmenu = wx.Menu()
calcmenu.Append(self.ids['calc_psf'], 'Compute PSF')
self.Preferences = calcmenu.Append(self.ids['calc_options'], 'More Options...')
calcmenu.AppendSeparator()
calcmenu.Append(self.ids['display_spectrum'], 'Display Spectrum')
calcmenu.Append(self.ids['display_optics'], 'Display Optics')
calcmenu.Append(self.ids['display_opd'], 'Display OPD')
calcmenu.Append(self.ids['display_psf'], 'Display PSF')
calcmenu.Append(self.ids['display_profiles'], 'Display PSF Profiles')
# Help Menu
helpmenu = wx.Menu()
self.Docs = helpmenu.Append(self.ids['documentation'], 'WebbPSF Documentation\tCtrl+d')
self.About = helpmenu.Append(wx.ID_ABOUT, '&About WebbPSF', 'About this program')
self.Append(filemenu, '&File')
self.Append(editmenu, '&Edit')
self.Append(calcmenu, '&Calculation')
self.Append(helpmenu, '&Help')
# -------------------------------------------------------------------------
class WebbPSFDialog(wx.Dialog):
""" Generic dialog box for WebbPSF
TODO: investigate wx.Validator to validate the text input fields
"""
def __init__(self, parent=None, id=-1, title="Dialog", **kwargs):
wx.Dialog.__init__(self, parent, id=id, title=title, **kwargs)
self.parent = parent
self.results = None # in case we cancel this gets returned
self.widgets = {}
self.values = {}
# self._createWidgets()
def _add_labeled_dropdown(self, name, parent, parentsizer, label="Entry:", choices=None, default=0, width=5, position=(0, 0),
columnspan=1, **kwargs):
"""convenient wrapper for adding a Combobox
columnspan sets the span for the combobox itself
"""
mylabel = wx.StaticText(parent, -1, label=label)
parentsizer.Add(mylabel, position, (1, 1), wx.EXPAND)
if choices is None:
try:
choices = self.values[name]
except:
choices = ['Option A', 'Option B']
if isinstance(default, int):
value = choices[default]
else:
value = default
mycombo = wx.ComboBox(parent, -1, value=value, choices=choices, style=wx.CB_DROPDOWN | wx.CB_READONLY)
parentsizer.Add(mycombo, (position[0], position[1] + 1), (1, columnspan), wx.EXPAND)
self.widgets[name] = mycombo
def _add_labeled_entry(self, name, parent, parentsizer, label="Entry:", value=None, format="%.2g",
width=5, position=(0, 0), postlabel=None, **kwargs):
"convenient wrapper for adding an Entry"
mylabel = wx.StaticText(parent, -1, label=label)
parentsizer.Add(mylabel, position, (1, 1), wx.EXPAND)
if value is None:
try:
value = format % self.input_options[name]
except:
value = ""
else:
try:
value = format % value
except:
pass
mytext = wx.TextCtrl(parent, -1, value=value)
parentsizer.Add(mytext, (position[0], position[1] + 1), (1, 1), wx.EXPAND)
self.widgets[name] = mytext
if postlabel is not None:
mylabel2 = wx.StaticText(parent, -1, label=postlabel)
parentsizer.Add(mylabel2, (position[0], position[1] + 2), (1, 1), wx.EXPAND)
def _createWidgets(self):
pass
# subclass me
def OnButtonOK(self, event):
pass
class WebbPSFOptionsDialog(WebbPSFDialog):
""" Dialog box for WebbPSF options
TODO: investigate wx.Validator to validate the text input fields
"""
def __init__(self, parent=None, id=-1, title="WebbPSF Options",
input_options=_default_options()):
WebbPSFDialog.__init__(self, parent, id=id, title=title)
self.input_options = input_options
colortables = [
('Jet (blue to red)', matplotlib.cm.jet),
('Gray', matplotlib.cm.gray),
('Heat (black-red-yellow)', matplotlib.cm.gist_heat),
('Copper (black to tan)', matplotlib.cm.copper),
('Stern', matplotlib.cm.gist_stern),
('Prism (repeating rainbow)', matplotlib.cm.prism)]
try:
import collections
self.colortables = collections.OrderedDict(colortables)
except:
self.colortables = dict(colortables)
self.values['force_coron'] = ['regular propagation (MFT)', 'full coronagraphic propagation (FFT/SAM)']
self.values['no_sam'] = ['semi-analytic method if possible', 'basic FFT method always']
self.values['monochromatic'] = ['Broadband', 'Monochromatic']
self.values['parallelization'] = ['Sequential', 'Parallelized']
self.values['fov_in_arcsec'] = ['Arcseconds', 'Pixels']
self._createWidgets()
def _createWidgets(self):
topSizer = wx.BoxSizer(wx.VERTICAL)
topPanel = wx.Panel(self)
topPanelSizer = wx.FlexGridSizer(rows=3, cols=1, hgap=5, vgap=10)
# topPanelSizer = wx.BoxSizer(wx.VERTICAL)
panel1 = wx.Panel(topPanel, style=wx.SIMPLE_BORDER | wx.EXPAND)
sizer = wx.GridBagSizer()
txt = wx.StaticText(panel1, -1, label="Propagation Calculation Options")
sizer.Add(txt, (0, 0), (1, 3), wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL)
r = 1
self._add_labeled_dropdown("monochromatic", panel1, sizer,
label=' Broadband or monochromatic? ',
default=1 if self.input_options['monochromatic'] else 0, position=(r, 0))
r += 1
self._add_labeled_dropdown("parallelization", panel1, sizer,
label=' Parallelize Wavelengths? ',
default=1 if self.input_options['parallelization'] else 0, position=(r, 0))
r += 1
self._add_labeled_dropdown("force_coron", panel1, sizer,
label=' Direct imaging calculations use: ',
default=1 if self.input_options['force_coron'] else 0, position=(r, 0))
r += 1
self._add_labeled_dropdown("no_sam", panel1, sizer,
label=' Coronagraphic calculations use',
default=1 if self.input_options['no_sam'] else 0, position=(r, 0))
r += 1
self._add_labeled_dropdown("parity", panel1, sizer,
label=' Output pixel grid parity is',
choices=['odd', 'even', 'either'], default=self.input_options['parity'], position=(r, 0))
r += 1
self._add_labeled_dropdown("fov_in_arcsec", panel1, sizer,
label=' Specify field of view in: ',
default=0 if self.input_options['fov_in_arcsec'] else 1, position=(r, 0))
# sizer.AddGrowableCol(0)
panel1.SetSizerAndFit(sizer)
panel2 = wx.Panel(topPanel, style=wx.SIMPLE_BORDER | wx.EXPAND)
sizer = wx.GridBagSizer()
r = 0
txt = wx.StaticText(panel2, -1, label="PSF Display Options")
sizer.Add(txt, (r, 0), (1, 3), wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL)
r += 1
self._add_labeled_dropdown("psf_scale", panel2, sizer, label=' Display scale:', choices=['log', 'linear'],
default=self.input_options['psf_scale'], position=(r, 0))
r += 1
self._add_labeled_entry("psf_vmin", panel2, sizer, label=' Min scale value:',
format="%.2g", width=7,
position=(r, 0))
r += 1
self._add_labeled_entry("psf_vmax", panel2, sizer, label=' Max scale value:',
format="%.2g", width=7,
position=(r, 0))
r += 1
self._add_labeled_dropdown("psf_normalize", panel2, sizer,
label=' Normalize PSF to:', choices=['Total', 'Peak'],
default=self.input_options['psf_normalize'], position=(r, 0))
r += 1
self._add_labeled_dropdown("psf_cmap", panel2, sizer, label=' Color table:',
choices=[a for a in self.colortables],
default=self.input_options['psf_cmap_str'],
position=(r, 0))
panel2.SetSizerAndFit(sizer)
bbar = self.CreateStdDialogButtonSizer(wx.OK | wx.CANCEL)
self.Bind(wx.EVT_BUTTON, self.OnButtonOK, id=wx.ID_OK)
# self.Bind(wx.EVT_BUTTON, self.OnButtonCancel, id=wx.ID_CANCEL)
topPanelSizer.Add(panel1, 1, wx.EXPAND)
topPanelSizer.Add(panel2, 1, wx.EXPAND)
# topPanelSizer.Add(panel3,1, wx.EXPAND)
topPanelSizer.Add(bbar, 1, wx.EXPAND)
# topPanel.AddGrowableCol(0)
topPanel.SetSizerAndFit(topPanelSizer)
topSizer.Add(topPanel, 1, flag=wx.EXPAND | wx.ALL, border=10)
self.SetSizerAndFit(topSizer)
self.Show(True)
# def OnButtonCancel(self, event):
# print("User pressed Cancel")
# self.Close()
# self.Destroy()
def OnButtonOK(self, event):
print("User pressed OK")
try:
results = {}
results['force_coron'] = self.widgets['force_coron'].GetValue() == 'full coronagraphic propagation (FFT/SAM)'
results['no_sam'] = self.widgets['no_sam'].GetValue() == 'basic FFT method always'
results['monochromatic'] = self.widgets['monochromatic'].GetValue() == 'Monochromatic'
results['parallelization'] = self.widgets['parallelization'].GetValue() == 'Parallelized'
results['fov_in_arcsec'] = self.widgets['fov_in_arcsec'].GetValue() == 'Arcseconds'
results['parity'] = self.widgets['parity'].GetValue()
results['psf_scale'] = self.widgets['psf_scale'].GetValue()
results['psf_vmax'] = float(self.widgets['psf_vmax'].GetValue())
results['psf_vmin'] = float(self.widgets['psf_vmin'].GetValue())
results['psf_cmap_str'] = self.widgets['psf_cmap'].GetValue()
results['psf_cmap'] = self.colortables[self.widgets['psf_cmap'].GetValue()]
results['psf_normalize'] = self.widgets['psf_normalize'].GetValue()
print(results)
self.results = results # for access from calling routine
self.Close()
# self.Destroy() # return... If called as a modal dialog, should ShowModal and Destroy from calling routine?
except:
_log.error("Invalid entries in one or more fields. Please check values and re-enter!")
# -------------------------------------------------------------------------
class WebbPSFPreferencesDialog(WebbPSFDialog):
""" Dialog box for WebbPSF options
TODO: investigate wx.Validator to validate the text input fields
"""
def __init__(self, parent=None, id=-1, title="WebbPSF Preferences"):
WebbPSFDialog.__init__(self, parent, id=id, title=title, size=(800, 400))
self._createWidgets()
def _createWidgets(self):
topSizer = wx.BoxSizer(wx.VERTICAL)
| |
the obvious (and
default) choice is 0.5.
:param tuner: Which tuner to use for balancing?
:raises RuntimeError: Lock is not on line.
"""
status = await self.get_lock_status()
if not status == LockStatus.ON_LINE:
raise RuntimeError("Lock is {}. Refusing to balance.".format(status))
imbalance = await self._lockbox.get() - equilibrium
LOGGER.debug("Imbalance is %s of %s", imbalance, cs.LOCKBOX_ALLOWABLE_IMBALANCE)
if abs(imbalance) <= cs.LOCKBOX_ALLOWABLE_IMBALANCE:
LOGGER.debug("No need to balance lock.")
return
# We need to manually tune the distance that is currently maintained by
# the lockbox output.
distance = imbalance * self._lockbox.scale
LOGGER.info("Balancing lock by %s units.", distance)
await self.tune(SpecMhz(cs.LOCK_SFG_FACTOR * distance), tuner)
async def doppler_sweep(self) -> Optional[cs.DopplerLine]:
"""Do one scan and see if there's a doppler line nearby.
:returns: Distance to doppler line and its depth if there is a line,
None otherwise.
"""
signal = await self.acquire_signal()
try:
return signals.locate_doppler_line(signal.transpose())
except ValueError:
LOGGER.debug("Didn't find a line.")
return None
async def doppler_search(
self, tuner: Tuner,
judge: Callable[[cs.DopplerLine], Union[Awaitable[bool], bool]] = lambda _: True,
step_size: SpecMhz = cs.PRELOCK_STEP_SIZE,
max_range: LaserMhz = cs.PRELOCK_MAX_RANGE) -> cs.DopplerLine:
"""Search for a doppler-broadened line around current working point.
:param speed_constraint: When tuning away from the initial working
point, don't use tuners that take longer than ~ seconds for
a jump.
:param judge: A coroutine function that is able to say if the line it
got passed is the line we're searching for. If this method
doesn't evaluate True, we keep searching as if no line was
found. This must not detune the system or if it does, undo
what it has done afterwards.
:param step_size: When searching, spectral sample points will be spaced
this far from each other.
:param max_range: Don't deviate further than ~ LaserMhz from initial
working point. Useful in avoiding to graze through
multiple mode hops.
:returns: The distance of the found line from the last active search
position in MHz.
:raises ValueError: The inputs don't make sense.
:raises SnowblindError: Didn't find a line. Staying at last active
search position. TODO: Go back where we came from.
"""
red, blue = await tuner.get_max_jumps()
reach = (max_range if max_range
else cs.LOCK_SFG_FACTOR * min(max_range, max(red, blue)))
LOGGER.debug("red: %s, blue: %s, reach: %s", red, blue, reach)
if not red > step_size and not blue > step_size:
raise ValueError("Not enough tuning range for that step size in "
"either direction.")
if not red > step_size or not blue > step_size:
LOGGER.warning("Edge of tuning range. Can only search in one direction.")
if max_range:
if max_range < step_size:
raise ValueError("Choose bigger range for this step size.")
if max_range > red or max_range > blue:
LOGGER.warning("Can't search requested range in both directions.")
# Zig-zag back and forth, gradually extending the distance to the
# origin.
alternate = True # search zig-zag
relative_position = SpecMhz(0) # distance to origin
sign = +1 # zig or zag?
counter = 1 # how far to jump with next zig resp. zag
dip = await self.doppler_sweep() # type: DopplerLine
step = step_size
old_tuner_state = await tuner.get()
while True:
if is_shaky():
LOGGER.warning("Aborting doppler search, as system is shaky.")
break
try:
LOGGER.debug("Target is %s + %s = %s",
relative_position, step, relative_position + step)
if abs(relative_position + step) > reach:
LOGGER.debug("Would exceed reach.")
raise ValueError
LOGGER.debug("Is in reach. Tuning by %s.", step)
await self.tune(step, tuner) # raises TuningRangeError!
relative_position = SpecMhz(relative_position + step)
except (ValueError, TuningRangeError):
LOGGER.debug("Couldn't tune.")
if alternate:
# We hit a boundary in one direction. If we still didn't
# find a line, we'll now search in the remaining direction
# as far as possible.
LOGGER.info("Switching to single-sided mode.")
alternate = False
step = SpecMhz(-1 * sign * step_size)
continue
else:
# Even the single-sided search didn't turn anything out.
LOGGER.warning("Exiting single-sided mode. No match at all.")
break
LOGGER.info("Searching at %s.", relative_position)
dip = await self.doppler_sweep()
if alternate:
counter += 1
sign *= -1
step = SpecMhz(sign * counter * step_size)
if isinstance(dip, DopplerLine):
is_legal = await tools.async_call(judge, dip)
LOGGER.info("Found %s, %s deep dip %s MHz from the starting position.",
'correct' if is_legal else 'incorrect',
dip.depth,
dip.distance + relative_position)
if is_legal:
return dip
await tuner.set(old_tuner_state)
raise SnowblindError("Didn't find a doppler dip.")
async def engage_and_maintain(self) -> None:
"""Engage the lock and maintain it long-term.
:raises LockError: The initial locking failed.
"""
balancer = None # type: asyncio.Task
def launch_balancer() -> None: # We will pass this as callback.
"""Engage the balancer task in the background."""
nonlocal balancer
balancer = self._loop.create_task(self.start_balancer())
LOGGER.info("LockBuddy._lock")
await self._lock()
LOGGER.info("LockBuddy._lock done")
try:
launch_balancer()
# The balancer may fail during relock operations and thus needs to
# be "paused" during those. This coroutine will only ever complete
# if something goes wrong or is cancelled.
await self.start_relocker(on_lock_lost=balancer.cancel,
on_lock_on=launch_balancer)
balancer.cancel() # In case the relocker fails.
except asyncio.CancelledError:
LOGGER.info("Lock maintenance was cancelled.")
balancer.cancel()
raise
async def get_lock_status(self) -> LockStatus:
"""What status is the lock currently in?
:returns: The current lock status.
:raises ConnectionError: Couldn't determine lockbox on/off state.
"""
state = await tools.safe_async_call(self._locked)
if state == LockboxState.DISENGAGED:
return LockStatus.OFF
if state == LockboxState.DEGRADED:
return LockStatus.DEGRADED
if state == LockboxState.ENGAGED:
level = await self._lockbox.get()
if level < cs.LOCKBOX_RAIL_ZONE / 2 or level > 1 - (cs.LOCKBOX_RAIL_ZONE / 2):
return LockStatus.RAIL
return LockStatus.ON_LINE
raise ConnectionError("Couldn't get lockbox state from callback.")
async def is_correct_line(self, tuner: Tuner, hint: DopplerLine = None,
reset: bool = False) -> bool:
"""Are we close to the right line?
:raises SnowblindError: We are not close to any line.
:raises DriftError: Unable to center the dip well enough for
measurement. We're possibly experiencing heavy drifts.
"""
dip = hint if hint else await self.doppler_sweep()
if not dip:
raise SnowblindError("There is no line nearby.")
state_before = await tuner.get()
try:
for attempt in range(cs.PRELOCK_TUNING_ATTEMPTS):
if abs(dip.distance) < cs.PRELOCK_TUNING_PRECISION:
LOGGER.info("Took %s attempts to center dip.", attempt)
break
await self.tune(dip.distance, tuner)
dip = await self.doppler_sweep()
else:
raise DriftError("Unable to center doppler line.")
finally:
if reset:
await tuner.set(state_before)
return dip.depth < cs.PRELOCK_DIP_DECIDING_DEPTH
async def start_balancer(self) -> None:
"""Watch a running lock and correct for occurring drifts."""
status = await self.get_lock_status()
if status != LockStatus.ON_LINE:
raise RuntimeError("Lock is {}. Won't invoke balancer.".format(status))
while True:
if is_shaky():
LOGGER.debug("Suppressing primary balancer, as system is shaky.")
else:
status = await self.get_lock_status()
if status == LockStatus.ON_LINE:
await self.balance(Ts.MO, equilibrium=cs.LOCKBOX_BALANCE_POINT)
else:
break
await asyncio.sleep(cs.LOCKBOX_BALANCE_INTERVAL)
LOGGER.warning("Lock balancer cancelled itself, as lock is %s.", status)
async def start_relocker(
self, on_lock_lost: Callable[[], Any] = lambda: None,
on_lock_on: Callable[[], Any] = lambda: None) -> None:
"""Supervise a running lock and relock whenever it gets lost.
:param on_lock_lost: Will be called as soon as a lock rail event is
registered.
:param on_lock_on: Will be called when the relock process was completed
after a lock loss.
:raises RuntimeError: No sound lock to start on.
"""
status = await self.get_lock_status()
if status != LockStatus.ON_LINE:
raise RuntimeError("Lock is {}. Can't invoke relocker.".format(status))
async def relock() -> None:
await self._unlock()
await self._lock()
while True:
problem = await self.watchdog()
# This is where the loop can fail. At this point, we assume the
# lock to be on line. As watchdog() will refuse to engage on a
# railed lock (raises), this is how we get out if the lock couldn't
# be recovered through re-locking.
if is_shaky():
# The lock was lost in a shaky system. Kill the relocker now
# to avoid skidding onto a wrong HFS line.
LOGGER.debug("Killing relocker, as the system is shaky.")
return
if problem == LockStatus.RAIL:
LOGGER.info("Lock was lost. Relocking.")
await tools.safe_async_call(on_lock_lost)
# If this task gets cancelled during a relock attempt, make
# sure that we end up with a locked system:
await asyncio.shield(relock())
await tools.safe_async_call(on_lock_on)
else:
break
LOGGER.warning("Relocker is exiting due to Lock being %s.", problem)
async def tune(self, distance: SpecMhz, tuner: Tuner) -> None:
"""Simplified tuning when using a specific tuner.
:raises TuningRangeError: Can't get that far using this tuner.
"""
delta = LaserMhz(distance / cs.LOCK_SFG_FACTOR)
if not abs(delta) >= abs(tuner.granularity * | |
<filename>CV/Effective Transformer-based Solution for RSNA Intracranial Hemorrhage Detection/easymia/model/brain_intracranial_hemorrhage_clas/swin_transformers_update.py
# -*-coding utf-8 -*-
##########################################################################
#
# Copyright (c) 2022 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
"""
Implement Transformer Class for Swin Transformer
"""
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from .droppath import DropPath
class Identity(nn.Layer):
""" Identity layer
The output of this layer is the input without any change.
Use this layer to avoid if condition in some forward methods
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
"""
forward
"""
return x
class PatchEmbedding(nn.Layer):
"""Patch Embeddings
Apply patch embeddings on input images. Embeddings is implemented using a Conv2D op.
Attributes:
image_size: int, input image size, default: 224
patch_size: int, size of patch, default: 4
in_channels: int, input image channels, default: 3
embed_dim: int, embedding dimension, default: 96
"""
def __init__(self, image_size=224, patch_size=4, in_channels=3, embed_dim=96):
super().__init__()
image_size = (image_size, image_size) # TODO: add to_2tuple
patch_size = (patch_size, patch_size)
patches_resolution = [image_size[0]//patch_size[0], image_size[1]//patch_size[1]]
self.image_size = image_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_channels = in_channels
self.embed_dim = embed_dim
self.patch_embed = nn.Conv2D(in_channels=in_channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size)
w_attr, b_attr = self._init_weights_layernorm()
self.norm = nn.LayerNorm(embed_dim,
weight_attr=w_attr,
bias_attr=b_attr)
def _init_weights_layernorm(self):
"""
Layer Norm权重初始化
"""
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(1))
bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0))
return weight_attr, bias_attr
def forward(self, x):
"""
forward
"""
x = self.patch_embed(x) # [batch, embed_dim, h, w] h,w = patch_resolution
x = x.flatten(start_axis=2, stop_axis=-1) # [batch, embed_dim, h*w] h*w = num_patches
x = x.transpose([0, 2, 1]) # [batch, h*w, embed_dim]
x = self.norm(x) # [batch, num_patches, embed_dim]
return x
class PatchMerging(nn.Layer):
""" Patch Merging class
Merge multiple patch into one path and keep the out dim.
Spefically, merge adjacent 2x2 patches(dim=C) into 1 patch.
The concat dim 4*C is rescaled to 2*C
Attributes:
input_resolution: tuple of ints, the size of input
dim: dimension of single patch
reduction: nn.Linear which maps 4C to 2C dim
norm: nn.LayerNorm, applied after linear layer.
"""
def __init__(self, input_resolution, dim):
super(PatchMerging, self).__init__()
self.input_resolution = input_resolution
self.dim = dim
w_attr_1, b_attr_1 = self._init_weights()
self.reduction = nn.Linear(4 * dim,
2 * dim,
weight_attr=w_attr_1,
bias_attr=False)
w_attr_2, b_attr_2 = self._init_weights_layernorm()
self.norm = nn.LayerNorm(4 * dim,
weight_attr=w_attr_2,
bias_attr=b_attr_2)
def _init_weights_layernorm(self):
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(1))
bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0))
return weight_attr, bias_attr
def _init_weights(self):
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=.02))
bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0))
return weight_attr, bias_attr
def forward(self, x):
"""
forward
"""
h, w = self.input_resolution
b, _, c = x.shape
x = x.reshape([b, h, w, c])
x0 = x[:, fc00:e968:6179::de52:7100, fc00:e968:6179::de52:7100, :] # [B, H/2, W/2, C]
x1 = x[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fc00:e968:6179::de52:7100, :] # [B, H/2, W/2, C]
x2 = x[:, fc00:e968:6179::de52:7100, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :] # [B, H/2, W/2, C]
x3 = x[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :] # [B, H/2, W/2, C]
x = paddle.concat([x0, x1, x2, x3], -1) #[B, H/2, W/2, 4*C]
x = x.reshape([b, -1, 4 * c]) # [B, H/2*W/2, 4*C]
x = self.norm(x)
x = self.reduction(x)
return x
class Mlp(nn.Layer):
""" MLP module
Impl using nn.Linear and activation is GELU, dropout is applied.
Ops: fc -> act -> dropout -> fc -> dropout
Attributes:
fc1: nn.Linear
fc2: nn.Linear
act: GELU
dropout1: dropout after fc1
dropout2: dropout after fc2
"""
def __init__(self, in_features, hidden_features, dropout):
super(Mlp, self).__init__()
w_attr_1, b_attr_1 = self._init_weights()
self.fc1 = nn.Linear(in_features,
hidden_features,
weight_attr=w_attr_1,
bias_attr=b_attr_1)
w_attr_2, b_attr_2 = self._init_weights()
self.fc2 = nn.Linear(hidden_features,
in_features,
weight_attr=w_attr_2,
bias_attr=b_attr_2)
self.act = nn.GELU()
self.dropout = nn.Dropout(dropout)
def _init_weights(self):
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=.02))
bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0))
return weight_attr, bias_attr
def forward(self, x):
"""
forward
"""
x = self.fc1(x)
x = self.act(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class WindowAttention(nn.Layer):
"""Window based multihead attention, with relative position bias.
Both shifted window and non-shifted window are supported.
Attributes:
dim: int, input dimension (channels)
window_size: int, height and width of the window
num_heads: int, number of attention heads
qkv_bias: bool, if True, enable learnable bias to q,k,v, default: True
qk_scale: float, override default qk scale head_dim**-0.5 if set, default: None
attention_dropout: float, dropout of attention
dropout: float, dropout for output
"""
def __init__(self,
dim,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attention_dropout=0.,
dropout=0.):
super(WindowAttention, self).__init__()
self.window_size = window_size
self.num_heads = num_heads
self.dim = dim
self.dim_head = dim // num_heads
self.scale = qk_scale or self.dim_head ** -0.5
self.relative_position_bias_table = paddle.create_parameter(
shape=[(2 * window_size[0] -1) * (2 * window_size[1] - 1), num_heads],
dtype='float32',
default_initializer=paddle.nn.initializer.TruncatedNormal(std=.02))
# relative position index for each token inside window
coords_h = paddle.arange(self.window_size[0])
coords_w = paddle.arange(self.window_size[1])
coords = paddle.stack(paddle.meshgrid([coords_h, coords_w])) # [2, window_h, window_w]
coords_flatten = paddle.flatten(coords, 1) # [2, window_h * window_w]
# 2, window_h * window_w, window_h * window_h
relative_coords = coords_flatten.unsqueeze(2) - coords_flatten.unsqueeze(1)
# winwod_h*window_w, window_h*window_w, 2
relative_coords = relative_coords.transpose([1, 2, 0])
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
# [window_size * window_size, window_size*window_size]
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
w_attr_1, b_attr_1 = self._init_weights()
self.qkv = nn.Linear(dim,
dim * 3,
weight_attr=w_attr_1,
bias_attr=b_attr_1 if qkv_bias else False)
self.attn_dropout = nn.Dropout(attention_dropout)
w_attr_2, b_attr_2 = self._init_weights()
self.proj = nn.Linear(dim,
dim,
weight_attr=w_attr_2,
bias_attr=b_attr_2)
self.proj_dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(axis=-1)
def _init_weights(self):
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.TruncatedNormal(std=.02))
bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0))
return weight_attr, bias_attr
def transpose_multihead(self, x):
"""
multi head metirx transpose
"""
new_shape = x.shape[:-1] + [self.num_heads, self.dim_head]
x = x.reshape(new_shape)
x = x.transpose([0, 2, 1, 3])
return x
def get_relative_pos_bias_from_pos_index(self):
"""
# relative_position_bias_table is a ParamBase object
# https://github.com/PaddlePaddle/Paddle/blob/067f558c59b34dd6d8626aad73e9943cf7f5960f/python/paddle/fluid/framework.py#L5727
"""
table = self.relative_position_bias_table # N x num_heads
# index is a tensor
index = self.relative_position_index.reshape([-1]) # window_h*window_w * window_h*window_w
# NOTE: paddle does NOT support indexing Tensor by a Tensor
relative_position_bias = paddle.index_select(x=table, index=index)
return relative_position_bias
def forward(self, x, mask=None):
"""
forward
"""
qkv = self.qkv(x).chunk(3, axis=-1)
q, k, v = map(self.transpose_multihead, qkv)
q = q * self.scale
attn = paddle.matmul(q, k, transpose_y=True)
relative_position_bias = self.get_relative_pos_bias_from_pos_index()
relative_position_bias = relative_position_bias.reshape(
[self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1])
# nH, window_h*window_w, window_h*window_w
relative_position_bias = relative_position_bias.transpose([2, 0, 1])
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.reshape(
[x.shape[0] // nW, nW, self.num_heads, x.shape[1], x.shape[1]])
attn += mask.unsqueeze(1).unsqueeze(0)
attn = attn.reshape([-1, self.num_heads, x.shape[1], x.shape[1]])
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_dropout(attn)
z = paddle.matmul(attn, v)
z = z.transpose([0, 2, 1, 3])
new_shape = z.shape[:-2] + [self.dim]
z = z.reshape(new_shape)
z = self.proj(z)
z = self.proj_dropout(z)
return z
def windows_partition(x, window_size):
""" partite windows into window_size x window_size
Args:
x: Tensor, shape=[b, h, w, c]
window_size: int, window size
Returns:
x: Tensor, shape=[num_windows*b, window_size, window_size, c]
"""
B, H, W, C = x.shape
x = x.reshape([B, H // window_size, window_size, W // window_size, window_size, C])
x = x.transpose([0, 1, 3, 2, 4, 5])
x = x.reshape([-1, window_size, window_size, C]) #(num_windows*B, window_size, window_size, C)
return x
def windows_reverse(windows, window_size, H, W):
""" Window reverse
Args:
windows: (n_windows * B, window_size, window_size, C)
window_size: (int) window size
H: (int) height of image
W: (int) width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.reshape([B, H // window_size, W // window_size, window_size, window_size, -1])
x = x.transpose([0, 1, 3, 2, 4, 5])
x = x.reshape([B, H, W, -1])
return x
class SwinTransformerBlock(nn.Layer):
"""Swin transformer block
Contains window multi head self attention, droppath, mlp, norm and residual.
Attributes:
dim: int, input dimension (channels)
input_resolution: int, input resoultion
num_heads: int, number of attention heads
windos_size: int, window size, default: 7
shift_size: int, shift size for SW-MSA, default: 0
mlp_ratio: float, ratio of mlp hidden dim and input embedding dim, default: 4.
qkv_bias: | |
value
return
row = self.append(value)
for i, key in enumerate(self.key_on):
row[key] = item[i]
def normalize_row(self, row):
if row is None:
values = [None for i in range(len(self.row_columns))]
elif isinstance(row, (dict, SeabornRow)):
values = [row.get(k, None) for k in self.row_columns]
elif not isinstance(row, list):
values = [getattr(row, k, None) for k in self.row_columns]
else:
values = (row + [None for i in range(len(row),
len(self.row_columns))])
return SeabornRow(self.row_columns, values)
@classmethod
def str_to_obj(cls, text, deliminator='|'):
text = text.strip().split('\n')
list_of_list = [[cell.strip() for cell in row.split(deliminator)]
for row in text]
if list_of_list[0][0] == '' and list_of_list[0][-1] == '':
list_of_list = [row[1:-1] for row in list_of_list]
return cls(list_of_list)
@property
def parameters(self):
return self._parameters
@staticmethod
def safe_str(cell): # todo reconcile this with excel version of safe_str
if cell is None:
return ''
if isinstance(cell, basestring):
if cell.replace('.','').isdigit() or '"' in cell or cell in ['False','True']:
cell = '"%s"'%cell
return str(cell)
def obj_to_mark_down(self, title_columns=True):
"""
This will return a str of a mark down text
:param title_columns: bool if True will title all headers
:return: str
"""
md = [[self._title_column(cell) if title_columns else str(cell) for cell in self.columns]]
md += [[self.safe_str(row[col]) for col in self.columns] for row in self.table]
widths = []
for col in range(len(md[0])):
width = max([len(row[col])+1 for row in md])
widths.append(min(300,width))
md.insert(1, [":"+'-' * (width-1) for width in widths])
md = ['| '.join([row[c].ljust(widths[c])
for c in range(len(row))]) for row in md]
return '| ' + ' |\n| '.join(md) + ' |'
@classmethod
def objs_to_mark_down(cls, tables, keys=None, pretty_columns=True):
"""
:param tables: dict of {str <name>:SeabornTable}
:param keys: list of str of the order of keys to use
:return: str of the converted markdown tables
"""
keys = keys or tables.keys()
ret = ['#### '+key+'\n'+tables[key].obj_to_mark_down(pretty_columns=pretty_columns) for key in keys]
return '\n\n'.join(ret)
def obj_to_csv(self, quote_everything=False, space_columns=True):
"""
This will return a str of a csv text that is friendly to excel
:param quote_everything: bool if True will quote everyting if it needs
it or not
:param space_columns: bool if True it will align columns with spaces
:return: str
"""
csv = [[self.excel_cell(cell, quote_everything)
for cell in self.columns]]
csv += [[self.excel_cell(row[col], quote_everything)
for col in self.columns] for row in self.table]
if space_columns:
widths = []
def len_(obj, max_width=300):
ret = [len(o) for o in safe_str(obj).split('\r')]
return min(max_width, max(ret))
for col in range(len(csv[0])):
widths.append(max([len_(row[col]) for row in csv]))
csv = [','.join([row[c].ljust(widths[c])
for c in range(len(row))]) for row in csv]
else:
csv = [','.join(row) for row in csv]
if os.name == 'posix':
return '\r\n'.join(csv)
else:
return '\n'.join(csv)
def html_link_cells(self):
"""
This will return a new table with cell linked with their columns
that have <Link> in the name
:return:
"""
new_table = self.copy()
for row in new_table:
for c in new_table.columns:
link = '%s <Link>' % c
if row.get(link, None):
row[c] = '<a href="%s">%s</a>' % (row[link], row[c])
new_table.columns = [c for c in self.columns if '<Link>' not in c]
return new_table
def html_row_respan(self, row_span):
row_span = [col for col in (row_span or []) if col in self.columns]
if not row_span or len(self) < 2:
return
i = 0
while i < len(self):
for j, row in enumerate(self[i + 1:], i + 1):
differences = [c for c in row_span if self[i][c] != row[c]]
if differences:
break
for c in row_span:
self[i][c] = HTMLRowRespan(row[c], j - i + 1)
row[c] = HTMLRowRespan(row[c])
i = j if i != j else i + 1
def obj_to_html(self, tab='', border=1, cell_padding=5, cell_spacing=1,
border_color='black', align='center', row_span=None):
"""
This will return a str of an html table.
:param tab: str to insert before each line e.g. ' '
:param border: int of the thickness of the table lines
:param cell_padding: int of the padding for the cells
:param cell_spacing: int of the spacing for hte cells
:param border_color: str of the color for the border
:param align: str for cell alignment, center, left, right
:param row_span: list of rows to span
:return: str of html code
"""
html_table = self.html_link_cells()
html_table.html_row_respan(row_span)
data = [self.html_row(html_table.columns, tab + ' ', '#bcbcbc',
align=align)]
for i, row in enumerate(html_table):
color = '#dfe7f2' if i % 2 else None
row = [row[c] for c in html_table.columns]
data.append(self.html_row(row, tab + ' ', color, align=align))
ret = '''
<table border="%s" cellpadding="%s" cellspacing="%s"
bordercolor="%s" >
%s
</table>'''.strip().replace('\n ', '\n')
data = ('\n%s ' % tab).join(data)
return (ret % (border, cell_padding, cell_spacing, border_color, data)
).replace('\n', '\n%s' % tab)
@staticmethod
def html_cell(cell):
head = '<th'
if isinstance(cell, HTMLRowRespan):
if cell.count == 0:
return ''
head = '<th rowspan="%s"' % cell.count
if cell is None:
return '%s/>' % head
if '\n' not in safe_str(cell):
return '%s>%s</th>' % (head, cell)
return '%s align="left">%s</th>' % (
head, safe_str(cell).replace('\n', '<br>'))
def html_row(self, row, tab=' ', background_color=None, header='',
align='center'):
data = [self.html_cell(cell) for cell in row]
if background_color is not None:
header = 'bgcolor="%s"' % background_color + header
header += 'align="%s"' % align
return '<tr %s>\n%s %s\n%s</tr>' % (
header, tab, ('\n%s ' % tab).join(data), tab)
def excel_cell(self, cell, quote_everything=False):
"""
This will return a text that excel interprets correctly when
importing csv
:param cell: obj to store in the cell
:param quote_everything: bool to quote even if not necessary
:return:
"""
if cell is None:
return ''
if cell is True:
return 'TRUE'
if cell is False:
return 'FALSE'
if isinstance(cell, unicode):
cell = cell.replace(u'\u2019', "'").replace(u'\u2018', "'")
cell = cell.replace(u'\u201c', '"').replace(u'\u201d', '"')
if isinstance(cell, unicode):
cell = cell.encode('ascii', errors="replace")
if sys.version_info[0] == 3 and isinstance(cell, bytes):
cell = cell.decode('utf-8')
if quote_everything:
return '"' + safe_str(cell) + '"'
if isinstance(cell, str) and cell.replace('.', '').isdigit():
return '"' + cell + '"'
ret = safe_str(cell).replace('\r', '\\r').replace('\n', '\r')
if ret.startswith(' ') or ret.endswith(' '):
return '"' + ret.replace('"', '""') + '"'
for special_char in ['\r', '\t', '"', ',']:
if special_char in ret:
return '"' + ret.replace('"', '""') + '"'
return ret
@classmethod
def csv_to_obj(cls, file_path='', text='', columns=None,
remove_empty_rows=True, key_on=None):
"""
This will convert a csv file or csv text into a seaborn table and return it
:param file_path: str of the path to the file
:param text: str of the csv text
:param columns: list of str of columns to use
:param remove_empty_rows: bool if True will remove empty rows which can happen in non-trimmed file
:param key_on: list of str of columns to key on
:return: SeabornTable
"""
if file_path and os.path.exists(file_path):
text = open(file_path, 'rb').read()
if sys.version_info[0] == 3:
text = text.decode('utf-8')
data = []
text = text.replace('\xdf', 'B')
text = text.replace('\xef\xbb\xbf','')
if text.find('\r\n') == -1:
lines = text.replace('""', '\xdf').split('\n')
else:
lines = text.replace('""', '\xdf').split('\r\n')
for i in xrange(len(lines)):
lines[i] = lines[i].replace('\r', '\n').replace('\\r', '\r').split(',')
l = 0
while l < len(lines):
cells = lines[l]
l += 1
i = 0
row = []
while i < len(cells):
cell = cells[i] # for some reason this is slow in pycharm debug
i += 1
while cell.count('"') % 2:
if i >= len(cells): # this shouldn't, but maybe excel is doing it
cells += lines[l]
cell += "\n"+cells[i] # add the line break back in
l += 1
else:
cell += ',' + cells[i]
i += 1
cell = cell.strip()
if cell and cell[0] == '"' and cell[-1] == '"':
cell = cell[1:-1].replace('\xdf', '"')
elif cell.replace('.', '').isdigit():
cell = eval(cell)
row.append(cell)
if not remove_empty_rows or True in [bool(r) for r in row]:
data.append(row)
ret = cls(data, key_on=key_on)
ret.columns = columns or ret.columns
return ret
@classmethod
def mark_down_to_dict_of_obj(cls, file_path='', text='', columns=None, key_on=None, ignore_code_blocks=True):
"""
This will read multiple tables separated by a #### Header and return it as a dictionary of headers
:param file_path: str of the path to the file
:param text: str of the mark down text
:param columns: list of str of columns to use
:param key_on: list of str of columns to key on
:param ignore_code_blocks: bool if true will filter out any lines between ```
:return: OrderedDict of {<header>: SeabornTable}
"""
| |
ofname_id, doc_current,
researches, comments, for_rmis=None, rmis_data=None, vich_code=''):
#импорт для получения прайса и цены по услугам
from forms import forms_func
if rmis_data is None:
rmis_data = {}
researches_grouped_by_lab = [] # Лист с выбранными исследованиями по лабораториям
i = 0
result = {"r": False, "list_id": []}
ofname_id = ofname_id or -1
ofname = None
if not doc_current.is_member(["Лечащий врач", "Оператор лечащего врача"]):
result["message"] = "Недостаточно прав для создания направлений"
return result
if not Clients.Card.objects.filter(pk=client_id).exists():
result["message"] = "Карта в базе не зарегистрирована, попробуйте выполнить поиск заново"
return result
if client_id and researches: # если client_id получен и исследования получены
if ofname_id > -1:
ofname = umodels.DoctorProfile.objects.get(pk=ofname_id)
no_attach = False
conflict_list = []
conflict_keys = []
for v in researches: # нормализация исследований
researches_grouped_by_lab.append({v: researches[v]})
for vv in researches[v]:
research_tmp = directory.Researches.objects.get(pk=vv)
if research_tmp.no_attach and research_tmp.no_attach > 0:
if research_tmp.no_attach not in conflict_keys:
conflict_keys.append(research_tmp.no_attach)
if not no_attach:
conflict_list = [research_tmp.title]
else:
no_attach = True
conflict_list.append(research_tmp.title)
i += 1
res = []
for v in researches_grouped_by_lab: # цикл перевода листа в словарь
for key in v.keys():
res += v[key]
# {5:[0,2,5,7],6:[8]}
if not no_attach:
directions_for_researches = {} # Словарь для временной записи направлений.
# Исследования привязываются к направлению по группе
finsource = IstochnikiFinansirovaniya.objects.filter(pk=finsource).first()
# получить прайс
price_obj = IstochnikiFinansirovaniya.get_price_modifier(finsource)
for v in res:
research = directory.Researches.objects.get(pk=v)
research_coast = None
dir_group = -1
if research.direction:
dir_group = research.direction.pk
if dir_group > -1 and dir_group not in directions_for_researches.keys():
directions_for_researches[dir_group] = Napravleniya.gen_napravleniye(client_id,
doc_current if not for_rmis else None,
finsource,
diagnos,
history_num,
doc_current,
ofname_id,
ofname,
for_rmis=for_rmis,
rmis_data=rmis_data)
result["list_id"].append(directions_for_researches[dir_group].pk)
if dir_group == -1:
dir_group = "id" + str(research.pk)
directions_for_researches[dir_group] = Napravleniya.gen_napravleniye(client_id,
doc_current if not for_rmis else None,
finsource,
diagnos,
history_num,
doc_current,
ofname_id,
ofname,
for_rmis=for_rmis,
rmis_data=rmis_data)
result["list_id"].append(directions_for_researches[dir_group].pk)
# получить по прайсу и услуге: текущую цену
research_coast = contracts.PriceCoast.get_coast_from_price(research.pk, price_obj)
research_discount = 10*-1
research_howmany = 1
issledovaniye = Issledovaniya(napravleniye=directions_for_researches[dir_group],
research=research,coast=research_coast,discount=research_discount,
how_many=research_howmany,
deferred=False)
issledovaniye.comment = (comments.get(str(research.pk), "") or "")[:10]
issledovaniye.save()
FrequencyOfUseResearches.inc(research, doc_current)
for k, v in directions_for_researches.items():
if Issledovaniya.objects.filter(napravleniye=v, research__need_vich_code=True).exists():
v.vich_code = vich_code
v.save()
result["r"] = True
slog.Log(key=json.dumps(result["list_id"]), user=doc_current, type=21,
body=json.dumps({"researches": researches,
"client_num": Clients.Card.objects.get(pk=client_id).number,
"client_id": client_id, "diagnos": diagnos,
"finsource": "" if not finsource else finsource.title + " " + finsource.base.title,
"history_num": history_num, "ofname": str(ofname),
"for_rmis": for_rmis,
"rmis_data": rmis_data,
"comments": comments})).save()
else:
result["r"] = False
result["message"] = "Следующие исследования не могут быть назначены вместе: " + ", ".join(conflict_list)
return result
def has_confirm(self):
"""
Есть ли подтверждение у одного или более исследований в направлении
:return: True, если есть подтверждение у одного или более
"""
return any([x.doc_confirmation is not None for x in Issledovaniya.objects.filter(napravleniye=self)])
def is_all_confirm(self):
"""
Есть ли подтверждение у всех исследований в направлении
:return: True, если всё подтверждено
"""
return all([x.doc_confirmation is not None for x in Issledovaniya.objects.filter(napravleniye=self)])
def is_has_deff(self):
"""
Есть ли отложенные исследования
:return: True, если подтверждены не все и есть одно или более отложенное исследование
"""
return not self.is_all_confirm() and any([x.deferred for x in Issledovaniya.objects.filter(napravleniye=self)])
def department(self):
if Issledovaniya.objects.filter(napravleniye=self).exists():
return Issledovaniya.objects.filter(napravleniye=self)[0].research.podrazdeleniye
return None
def rmis_direction_type(self) -> str:
dep = self.department()
if dep:
return dep.rmis_direction_type
from rmis_integration.client import Settings
return Settings.get("direction_type_title", default="Направление в лабораторию")
def rmis_department_title(self) -> str:
dep = self.department()
if dep:
return dep.rmis_department_title
from rmis_integration.client import Settings
return Settings.get("depname")
def rmis_referral_title(self) -> str:
return self.doc.podrazdeleniye.rmis_department_title
class Meta:
verbose_name = 'Направление'
verbose_name_plural = 'Направления'
class Issledovaniya(models.Model):
"""
Направления на исследования
"""
napravleniye = models.ForeignKey(Napravleniya, help_text='Направление', db_index=True, on_delete=models.CASCADE)
research = models.ForeignKey(directory.Researches, null=True, blank=True, help_text='Вид исследования из справочника', db_index=True, on_delete=models.CASCADE)
tubes = models.ManyToManyField(TubesRegistration, help_text='Ёмкости, необходимые для исследования', db_index=True)
doc_save = models.ForeignKey(DoctorProfile, null=True, blank=True, related_name="doc_save", db_index=True, help_text='Профиль пользователя, сохранившего результат', on_delete=models.SET_NULL)
time_save = models.DateTimeField(null=True, blank=True, db_index=True, help_text='Время сохранения результата')
doc_confirmation = models.ForeignKey(DoctorProfile, null=True, blank=True, related_name="doc_confirmation", db_index=True, help_text='Профиль пользователя, подтвердившего результат', on_delete=models.SET_NULL)
time_confirmation = models.DateTimeField(null=True, blank=True, db_index=True, help_text='Время подтверждения результата')
deferred = models.BooleanField(default=False, blank=True, help_text='Флаг, отложено ли иследование', db_index=True)
comment = models.CharField(max_length=10, default="", blank=True, help_text='Комментарий (отображается на ёмкости)')
lab_comment = models.TextField(default="", null=True, blank=True, help_text='Комментарий, оставленный лабораторией')
api_app = models.ForeignKey(Application, null=True, blank=True, default=None, help_text='Приложение API, через которое результаты были сохранены', on_delete=models.SET_NULL)
coast = models.DecimalField(max_digits=10,null=True, blank=True, default=None, decimal_places=2)
discount = models.SmallIntegerField(default=0, help_text='Скидка назначена оператором')
how_many = models.PositiveSmallIntegerField(default=1,help_text='Кол-во услуг назначено оператором')
def __str__(self):
return "%d %s" % (self.napravleniye.pk, self.research.title)
def is_get_material(self):
"""
Осуществлен ли забор всего материала для исследования
:return: True, если весь материал взят
"""
return self.tubes.filter().exists() and all([x.doc_get is not None for x in self.tubes.filter()])
def get_visit_date(self):
if not self.time_confirmation:
return ""
if not self.napravleniye.visit_date or not self.napravleniye.visit_who_mark:
self.napravleniye.visit_date = timezone.now()
self.napravleniye.visit_who_mark = self.doc_confirmation
self.napravleniye.save()
return strdate(self.napravleniye.visit_date)
def is_receive_material(self):
"""
Осуществлен ли прием материала лабораторией
:return: True, если весь материал принят
"""
return self.is_get_material() and all([x.doc_recive is not None for x in self.tubes.filter()])
def get_analyzer(self):
return "" if not self.api_app else self.api_app.name
class Meta:
verbose_name = 'Назначение на исследование'
verbose_name_plural = 'Назначения на исследования'
class ParaclinicResult(models.Model):
issledovaniye = models.ForeignKey(Issledovaniya, db_index=True,
help_text='Направление на исследование, для которого сохранен результат',
on_delete=models.CASCADE)
field = models.ForeignKey(directory.ParaclinicInputField, db_index=True,
help_text='Поле результата',
on_delete=models.CASCADE)
value = models.TextField()
class RmisServices(models.Model):
napravleniye = models.ForeignKey(Napravleniya, help_text='Направление', db_index=True, on_delete=models.CASCADE)
code = models.TextField(help_text='Код выгруженной услуги', db_index=True)
rmis_id = models.CharField(max_length=15, default="", blank=True, help_text='ID выгруженной услуги в РМИС')
def __str__(self):
return "%s %s" % (self.napravleniye, self.code)
class Meta:
verbose_name = 'Выгруженная в РМИС услуга для направления'
verbose_name_plural = 'Выгруженные в РМИС услуги для направлений'
class Result(models.Model):
"""
Результат исследований
"""
issledovaniye = models.ForeignKey(Issledovaniya, db_index=True, help_text='Направление на исследование, для которого сохранен результат', on_delete=models.CASCADE)
fraction = models.ForeignKey(directory.Fractions, help_text='Фракция из исследования', db_index=True, on_delete=models.CASCADE)
value = models.TextField(null=True, blank=True, help_text='Значение')
iteration = models.IntegerField(default=1, null=True, help_text='Итерация')
is_normal = models.CharField(max_length=255, default="", null=True, blank=True, help_text="Это норма?")
ref_m = JSONField(default=None, blank=True, null=True, help_text="Референсы М")
ref_f = JSONField(default=None, blank=True, null=True, help_text="Референсы Ж")
units = models.CharField(max_length=255, default=None, blank=True, null=True, help_text="Единицы измерения")
ref_title = models.CharField(max_length=255, default=None, blank=True, null=True, help_text="Референсы Название")
ref_about = models.TextField(default=None, blank=True, null=True, help_text="Референсы Описание")
def __str__(self):
return "%s | %s | %s" % (self.pk, self.fraction, self.ref_m is not None and self.ref_f is not None)
def get_units(self, needsave=True):
if not self.units and self.fraction.units and self.fraction.units != "":
self.units = self.fraction.units
if needsave:
self.save()
return self.units or ""
def get_ref(self, as_str=False, full=False, fromsave=False, re_save=False, needsave=True):
if (not self.ref_title and not fromsave) or re_save:
self.ref_title = "Default" if self.fraction.default_ref is None else self.fraction.default_ref.title
self.save()
if not self.ref_m or re_save:
self.ref_m = self.fraction.ref_m if self.fraction.default_ref is None else self.fraction.default_ref.ref_m
if needsave:
self.save()
if not self.ref_f or re_save:
self.ref_f = self.fraction.ref_f if self.fraction.default_ref is None else self.fraction.default_ref.ref_f
if needsave:
self.save()
if not self.ref_about or re_save:
self.ref_about = "" if self.fraction.default_ref is None else self.fraction.default_ref.about
if needsave:
self.save()
if full:
return {"title": self.ref_title, "about": self.ref_about, "m": self.ref_m, "f": self.ref_f}
ref = self.ref_f if self.issledovaniye.napravleniye.client.individual.sex.lower() != "м" else self.ref_m
if isinstance(ref, str):
ref = json.loads(ref)
if not ref:
ref = {}
if not as_str:
return ref
else:
return json.dumps(ref)
def get_is_norm(self, recalc=False):
if self.is_normal == "" or recalc:
norm = self.calc_normal()
if self.is_normal != norm:
self.save()
else:
norm = self.is_normal
return norm
def save(self, *args, **kw):
self.is_normal = self.calc_normal(True)
super(Result, self).save(*args, **kw)
def calc_normal(self, fromsave=False, only_ref=False, raw_ref=True):
import operator
from functools import reduce
trues = {True: ["полож.", "положительно", "да", "положительный", "обнаружено"],
False: ["отриц.", "отрицательно", "нет", "1/0", "отрицательный", "не обнаружено"]}
signs = {">": [">", ">", "более", "старше"], "<": ["<", "<", "до", "младше", "менее"]}
value = self.value
days, monthes, years = self.issledovaniye.napravleniye.client.individual.age(iss=self.issledovaniye, days_monthes_years=True)
ref = self.get_ref(fromsave=fromsave)
def isnum(r):
return r.replace(".", "", 1).replace(",", "", 1).isdigit()
def replace_pow(v):
v = str(v).replace(" ", "")
for j in range(1, 9):
for i in range(0, 12):
v = v.replace("%s*10<sup>%s</sup>" % (j, i), str(j * (10 ** i)))
for i in range(0, 12):
v = v.replace("10<sup>%s</sup>" % str(i), str(10 ** i))
return v
def val_normalize(v):
if v == float("inf"):
return v
v = replace_pow(v)
'''
if any([x in v for x in signs["<"]]):
pass
elif any([x in v for x in signs[">"]]):
pass'''
import re
tmp = re.findall("\d+,\d+", v)
for t in tmp:
v = v.replace(t, t.replace(",", "."))
tmp = re.findall("\d+\.\d+", v)
if len(tmp) == 0:
tmp = re.findall('\d+', v)
if len(tmp) == 0:
return False
return tmp[-1]
def rigths(r):
if r == "все" or r == "":
return 0, | |
#!/usr/bin/env python
"""
###############################################
Filter Module Component Specific Work Book View
###############################################
"""
# -*- coding: utf-8 -*-
#
# rtk.hardware.gui.gtk.Filter.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> andrew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
# Import modules for localization support.
import gettext
import locale
# Modules required for the GUI.
try:
import pygtk
pygtk.require('2.0')
except ImportError:
sys.exit(1)
try:
import gtk
except ImportError:
sys.exit(1)
try:
import gtk.glade
except ImportError:
sys.exit(1)
# Import other RTK modules.
try:
import Configuration
import gui.gtk.Widgets as Widgets
except ImportError:
import rtk.Configuration as Configuration
import rtk.gui.gtk.Widgets as Widgets
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error:
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
class Inputs(gtk.Frame):
"""
The Work Book view for displaying all the attributes for an electronic
filter. The attributes of an electronic filter Work Book view are:
:cvar list _lst_quality: list of MIL-HDBK-217FN2 quality levels.
:cvar list _lst_specification: list of Filter spcifications.
:cvar list _lst_style: list of Filter styles.
:ivar list _lst_count_labels: list of MIL-HDBK-217FN2 parts count input
labels.
:ivar list _lst_stress_labels: list of MIL-HDBK-217FN2 part stress input
labels.
:ivar list _lst_handler_id: list of gtk.Widget() signal IDs.
:ivar :py:class:`rtk.hardware.component.miscellaneous.Filter.Model` _hardware_model:
:ivar int _subcategory: the Component subcategory.
:ivar gtk.ComboBox cmbQuality: the gtk.ComboBox() to select and display the
MIL-HDBK-217FN2 quality level.
:ivar gtk.ComboBox cmbSpecification: the gtk.ComboBox() to select and
display the Filter's governing
specification.
:ivar gtk.ComboBox cmbStyle: the gtk.ComboBox() to select and display the
Filter's style.
:ivar gtk.Entry txtCommercialPiQ: the gtk.Entry() to enter and display the
user-defined quality factor.
"""
_lst_quality = ['', u"MIL-SPEC", _(u"Lower")]
_lst_specification = [u"", u"MIL-F-15733", u"MIL-F-18327"]
_lst_style = [[u"", _(u"Ceramic-Ferrite Construction"),
_(u"Discrete LC Components")],
[u"", _(u"Discrete LC Components"),
_(u"Discrete LC and Crystal Components")]]
def __init__(self, model):
"""
Method to create an input frame for the Filter data model.
:param model: the :py:class:`rtk.hardware.component.miscellaneous.Filter.Model`
whose attributes will be displayed.
"""
gtk.Frame.__init__(self)
self.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
# Define private dictionary attributes.
# Define private list attributes.
self._lst_count_labels = [_(u"Quality:")]
self._lst_stress_labels = [_(u"Quality:"),
_(u"\u03C0<sub>Q</sub> Override:"),
_(u"Specification:"), _(u"Style:")]
self._lst_handler_id = []
# Define private scalar attributes.
self._hardware_model = model
self._subcategory = model.subcategory
# Define public dictionary attributes.
# Define public list attributes.
# Define public scalar attributes.
self.cmbQuality = Widgets.make_combo(simple=True)
self.cmbSpecification = Widgets.make_combo(simple=True)
self.cmbStyle = Widgets.make_combo(simple=True)
self.txtCommercialPiQ = Widgets.make_entry(width=100)
# Create the tooltips for all the input widgets.
self.cmbQuality.set_tooltip_text(_(u"Select and display the quality"
u"level for the selected filter."))
self.cmbSpecification.set_tooltip_text(_(u"Select and display the "
u"governing specification "
u"for the selected filter."))
self.cmbStyle.set_tooltip_text(_(u"Select and display the "
u"construction style for the "
u"selected filter."))
self.txtCommercialPiQ.set_tooltip_text(_(u"User-defined quality "
u"factor for the selected "
u"filter."))
# Load the gtk.ComboBox().
for i in range(len(self._lst_quality)):
self.cmbQuality.insert_text(i, self._lst_quality[i])
for i in range(len(self._lst_specification)):
self.cmbSpecification.insert_text(i, self._lst_specification[i])
# Connect signals to callback functions.
self._lst_handler_id.append(
self.cmbQuality.connect('changed', self._on_combo_changed, 0))
self._lst_handler_id.append(
self.txtCommercialPiQ.connect('focus-out-event',
self._on_focus_out, 1))
self._lst_handler_id.append(
self.cmbSpecification.connect('changed',
self._on_combo_changed, 2))
self._lst_handler_id.append(
self.cmbStyle.connect('changed', self._on_combo_changed, 3))
def create_217_count_inputs(self, x_pos=5):
"""
Method to create the MIL-HDBK-217FN2 parts count input gtk.Widgets()
for Filters.
:keyword int x_pos: the x position of the display widgets.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"MIL-HDBK-217FN2 Parts Count Inputs") +
"</span>")
_label.set_justify(gtk.JUSTIFY_LEFT)
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.show_all()
self.set_label_widget(_label)
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
self.add(_scrollwindow)
# Create and place all the labels for the inputs.
(_x_pos,
_y_pos) = Widgets.make_labels(self._lst_count_labels, _fixed, 5, 5)
_x_pos = max(x_pos, _x_pos) + 50
# Place all the input widgets.
if self.cmbQuality.get_parent() is not None:
self.cmbQuality.reparent(_fixed)
_fixed.put(self.cmbQuality, _x_pos, _y_pos[0])
_fixed.show_all()
return x_pos
def create_217_stress_inputs(self, x_pos=5):
"""
Method to create the MIL-HDBK-217FN2 part stress input gtk.Widgets()
for Filters.
:keyword int x_pos: the x position of the display widgets.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"MIL-HDBK-217FN2 Part Stress Inputs") +
"</span>")
_label.set_justify(gtk.JUSTIFY_LEFT)
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.show_all()
self.set_label_widget(_label)
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
self.add(_scrollwindow)
# Create and place all the labels for the inputs.
(_x_pos,
_y_pos) = Widgets.make_labels(self._lst_stress_labels, _fixed, 5, 5)
_x_pos = max(x_pos, _x_pos) + 50
# Place all the input widgets.
if self.cmbQuality.get_parent() is not None:
self.cmbQuality.reparent(_fixed)
_fixed.put(self.cmbQuality, _x_pos, _y_pos[0])
_fixed.put(self.txtCommercialPiQ, _x_pos, _y_pos[1])
_fixed.put(self.cmbSpecification, _x_pos, _y_pos[2])
_fixed.put(self.cmbStyle, _x_pos, _y_pos[3])
_fixed.show_all()
return _x_pos
def load_217_count_inputs(self, model):
"""
Method to load the Filter class MIL-HDBK-217FN2 parts count input
gtk.Widgets().
:param model: the :py:class:`rtk.hardware.component.miscellaneous.Filter.Model`
to load the attributes from.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
self.cmbQuality.set_active(model.quality)
return False
def load_217_stress_inputs(self, model):
"""
Method to load the Filter class MIL-HDBK-217FN2 part stress input
gtk.Widgets().
:param model: the :py:class:`rtk.hardware.component.miscellaneous.Filter.Model`
to load the attributes from.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
fmt = '{0:0.' + str(Configuration.PLACES) + 'G}'
self.cmbQuality.set_active(model.quality)
self.cmbSpecification.set_active(model.specification)
self.cmbStyle.set_active(model.style)
self.txtCommercialPiQ.set_text(str(fmt.format(model.q_override)))
return False
def _on_combo_changed(self, combo, index):
"""
Method to respond to gtk.ComboBox() 'changed' signals and call the
correct Method to function or method, passing any parameters as needed.
:param gtk.ComboBox combo: the gtk.ComboBox() that called this method.
:param int index: the index in the handler ID list oc the callback
signal associated with the gtk.ComboBox() that
called this method.
:return: False if successful or True is an error is encountered.
:rtype: bool
"""
combo.handler_block(self._lst_handler_id[index])
if index == 0:
self._hardware_model.quality = combo.get_active()
elif index == 2:
self._hardware_model.specification = combo.get_active()
self._load_styles(self._hardware_model.specification - 1)
elif index == 3:
self._hardware_model.style = combo.get_active()
combo.handler_unblock(self._lst_handler_id[index])
return False
def _on_focus_out(self, entry, __event, index):
"""
Method to respond to gtk.Entry() 'focus_out' signals and call the
correct function or method, passing any parameters as needed.
:param gtk.Entry entry: the gtk.Entry() that called this method.
:param gtk.gdk.Event __event: the gtk.gdk.Event() that called this
method.
:param int index: the index in the handler ID list of the callback
signal associated with the gtk.Entry() that
called this method.
:return: False if successful or True is an error is encountered.
:rtype: bool
"""
entry.handler_block(self._lst_handler_id[index])
if index == 1:
self._hardware_model.q_override = float(entry.get_text())
entry.handler_unblock(self._lst_handler_id[index])
return False
def _load_styles(self, specification):
"""
Method to load the construction style gtk.ComboBox() whenever a new
specification is selected.
:param int specification: the selected specification index.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# Remove existing entries.
_model = self.cmbStyle.get_model()
_model.clear()
# Load the new entries.
_n_styles = len(self._lst_style[specification])
for i in range(_n_styles):
self.cmbStyle.insert_text(
i, self._lst_style[specification][i])
return False
class Results(gtk.Frame):
"""
The Work Book view for displaying all the output attributes for a
Filter. The output attributes of a Filter Work Book view are:
"""
def __init__(self, model):
"""
Method to initialize an instance of the Filter assessment | |
<reponame>figlerg/network_tracing
import hashlib
import os
import pickle
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import FormatStrFormatter
import matplotlib as mpl
from globals import *
from net import Net
from tqdm import tqdm
import cycler
import random
random.seed(12345)
# Direct input
plt.rcParams['text.latex.preamble'] = r"\usepackage{bm} \usepackage{amsmath}"
# plt.rcParams['text.latex.preamble']=[r"\usepackage{lmodern}"]
# Options
params = {'text.usetex': True,
'font.size': 10,
'font.family': 'serif'
# 'font.family' : 'lmodern',
}
plt.rcParams.update(params)
columwidth = 251.8 / 72.27 # 251.80688[pt] / 72.27[pt/inch]
def estimateQuotientCI(ax, xvalues, mean1, sd1, mean2, sd2, color, mccount, p=95):
iters = 2000
lowers = list()
uppers = list()
percs = [(100 - p) / 2, 100 - (100 - p) / 2]
"""
Monte Carlo mean 1/N*sum(X_i) implies:
V(1/N*sum(X_i))=1/(N^2)*sum(V(X_i))=1/(N^2)*N*V(X)=V(X)/N
=> Variance of monte carlo mean is 1/N times variance of single model result
=> SD is 1/sqrt(N) times SD of model result
"""
sd11 = sd1 / (mccount ** 0.5)
sd21 = sd2 / (mccount ** 0.5)
for m1, s1, m2, s2 in zip(mean1, sd11, mean2, sd21):
quotients = list()
for i in range(iters):
"""
since (sum(X_i)-mu)/(sqrt(N)*sigma) converges towards Normal(0,1) we may
assume 1/N*sum(X_i) approx ~ Normal(mu,sigma/sqrt(N))
"""
denom = random.normalvariate(m2, s2)
if denom <= 0: # truncate normal dist - negative values dont make sense
continue
nom = random.normalvariate(m1, s1)
if nom < 0: # truncate normal dist - negative values dont make sense
continue
quotients.append(nom / denom)
ps = np.percentile(quotients, percs)
lowers.append(ps[0])
uppers.append(ps[1])
ax.fill_between(xvalues, lowers, uppers, color=color, alpha=0.2, zorder=-1)
# pickling disabled for now, uncomment plot lines for that
def simple_experiment_old(n, p, p_i, mc_iterations, max_t, seed=0, mode=None, force_recompute=False, path=None,
clustering: float = None, dispersion=None):
# this creates the net, runs monte carlo on it and saves the resulting timeseries plot, as well as pickles for net and counts
assert not (dispersion and clustering), "Cannot set a dispersion target and " \
"a clustering target at the same time"
if dispersion:
chosen_epsilon = epsilon_disp
else:
chosen_epsilon = epsilon_clustering
if path:
dirname = path
else:
dirname_parent = os.path.dirname(__file__)
dirname = os.path.join(dirname_parent, 'Experiments')
# the cache is now tagged with a hash from all important parameters instead of the above.
# Any change to the model parameters will certainly trigger a recompute now
id_params = (
n, p, p_i, mc_iterations, max_t, seed, mode, clustering, dispersion, t_i, t_c, t_r, t_d, t_t, p_q, p_t,
quarantine_time, resolution, chosen_epsilon)
# normal hashes are salted between runs -> use something that is persistent
tag = str(hashlib.md5(str(id_params).encode('utf8')).hexdigest())
# disables loading pickled results
if force_recompute:
# if false, it looks at saved experiments and reuses those
net = Net(n=n, p=p, p_i=p_i, max_t=max_t, seed=seed, clustering_target=clustering, dispersion_target=dispersion)
counts, sd, achieved_clustering, achieved_disp = net.monte_carlo(mc_iterations, mode=mode)
with open(os.path.join(dirname, tag + '_net.p'), 'wb') as f:
pickle.dump((net, achieved_clustering, achieved_disp), f)
with open(os.path.join(dirname, tag + '_counts.p'), 'wb') as f:
pickle.dump((counts, sd), f)
else:
try:
with open(os.path.join(dirname, tag + "_counts.p"), 'rb') as f:
counts, sd = pickle.load(f)
with open(os.path.join(dirname, tag + "_net.p"), 'rb') as f:
net, achieved_clustering, achieved_disp = pickle.load(f)
print('Experiment results have been loaded from history.')
except FileNotFoundError:
net = Net(n=n, p=p, p_i=p_i, max_t=max_t, seed=seed, clustering_target=clustering,
dispersion_target=dispersion)
counts, sd, achieved_clustering, achieved_disp = net.monte_carlo(mc_iterations, mode=mode)
with open(os.path.join(dirname, tag + '_net.p'), 'wb') as f:
pickle.dump((net, achieved_clustering, achieved_disp), f)
with open(os.path.join(dirname, tag + '_counts.p'), 'wb') as f:
pickle.dump((counts, sd), f)
exposed = counts[EXP_STATE, :]
infected = counts[INF_STATE, :]
ep_curve = exposed + infected
# compute when the peak happens and what the ratio of infected is then
t_peak = np.argmax(ep_curve, axis=0)
peak_height = ep_curve[t_peak] / n
# compute the ratio of all exposed people at end of sim to the number of indiv.
# (also check heuristically whether an equilibrium has been reached
recovered = counts[REC_STATE, :]
virus_contacts = ep_curve + recovered
sensitivity = max(1, n / 100) # increasing divisor makes this more sensitive
equilib_flag = abs(
virus_contacts[-1] - virus_contacts[-2]) < sensitivity # just a heuristic, see whether roc is low
period_prevalence = virus_contacts[-1] / n
return net, counts, sd, t_peak, peak_height, equilib_flag, period_prevalence, achieved_clustering, achieved_disp
from do_experiment_parallel import \
simple_experiment # this is the new, parallel version of the above function. By Martin!
def vary_p(res, n, p_i, mc_iterations, max_t, interval=(0, 1), seed=0, mode=None, force_recompute=False, path=None):
# here I want to systematically check what varying the edge probability does. Should return something like a 1d heatmap?
# return value should use one of the values t_peak, peak_height, equilib_flag, period_prevalence
peak_times = np.ndarray(res)
mean_peak_heights = np.ndarray(res)
mean_period_prevalences = np.ndarray(res)
sd_peak_heights = np.ndarray(res)
sd_period_prevalences = np.ndarray(res)
ps = np.linspace(interval[0], interval[1], endpoint=True, num=res)
for i, p in enumerate(ps):
net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, clustering, dispersion = \
simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i, mode=mode,
force_recompute=force_recompute, path=path)
peak_times[i] = t_peak
mean_peak_heights[i] = mean_peak
sd_peak_heights[i] = sd_peak
mean_period_prevalences[i] = mean_prevalence
sd_period_prevalences[i] = sd_prevalence
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(16 * 1.5, 9 * 1.5))
ax1, ax2, ax3 = axes
if mode:
ax1.set_title(mode)
else:
ax1.set_title('vanilla')
ax1.plot(ps, peak_times)
# ax1.set_xlabel('p')
ax1.set_ylabel('Peak time')
ax2.plot(ps, mean_peak_heights)
# ax2.set_xlabel('p')
ax2.set_ylabel('Peak prevalence')
ax3.plot(ps, mean_period_prevalences)
ax3.set_ylabel('Fraction of affected')
ax3.set_xlabel('p')
# labels = [interval[0],] + list(['' for i in range(len(ps)-2)]) + [interval[1],]
ax3.set_xticks(ps[1:-2], minor=True)
ax3.set_xticks([interval[0], interval[1]])
plt.tick_params(
axis='x', # changes apply to the x-axis
which='minor', # both major and minor ticks are affected
# bottom=False, # ticks along the bottom edge are off
# top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
# plt.xticks([interval[0],interval[1]])
if mode:
fig.savefig(os.path.join(path, 'pvaried_n{}_p{}_{}'.format(
n, str(interval[0]) + 'to' + str(interval[1]), mode) + '.png'))
else:
fig.savefig(os.path.join(path, 'pvaried_n{}_p{}'.format(
n, str(interval[0]) + 'to' + str(interval[1])) + '.png'))
def vary_p_plot_cache(res, n, p_i, mc_iterations, max_t, interval=(0, 1), seed=0, force_recompute=False, path=None):
# utility function that loads all the pickles (or runs them first) and plots the three scenarios
# is a modified copy of vary_p !
peak_times = np.ndarray(res)
peak_heights = np.ndarray(res)
period_prevalences = np.ndarray(res)
peak_times_q = np.ndarray(res)
peak_heights_q = np.ndarray(res)
period_prevalences_q = np.ndarray(res)
peak_times_t = np.ndarray(res)
peak_heights_t = np.ndarray(res)
period_prevalences_t = np.ndarray(res)
peak_heights_sd = np.ndarray(res)
peak_heights_q_sd = np.ndarray(res)
peak_heights_t_sd = np.ndarray(res)
period_prevalences_sd = np.ndarray(res)
period_prevalences_q_sd = np.ndarray(res)
period_prevalences_t_sd = np.ndarray(res)
ps = np.linspace(interval[0], interval[1], endpoint=True, num=res)
# all 3 modes
for i, p in tqdm(enumerate(ps), total=res, desc='Vanilla'):
net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \
simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + res, mode=None,
force_recompute=force_recompute, path=path)
peak_times[i] = t_peak
peak_heights[i] = mean_peak
peak_heights_sd[i] = sd_peak
period_prevalences[i] = mean_prevalence
period_prevalences_sd[i] = sd_prevalence
for i, p in tqdm(enumerate(ps), total=res, desc='Quarantine'):
net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \
simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + 2 * res, mode='quarantine',
force_recompute=force_recompute,
path=path)
peak_times_q[i] = t_peak
peak_heights_q[i] = mean_peak
peak_heights_q_sd[i] = sd_peak
period_prevalences_q[i] = mean_prevalence
period_prevalences_q_sd[i] = sd_prevalence
for i, p in tqdm(enumerate(ps), total=res, desc='Tracing'):
net, mean_counts, sd_counts, t_peak, mean_peak, sd_peak, mean_prevalence, sd_prevalence, equilib_flag, achieved_clustering, achieved_disp = \
simple_experiment(n, p, p_i, mc_iterations, max_t, seed=seed + i + 3 * res, mode='tracing',
force_recompute=force_recompute,
path=path)
peak_times_t[i] = t_peak
peak_heights_t[i] = mean_peak
peak_heights_t_sd[i] = sd_peak
period_prevalences_t[i] = mean_prevalence
period_prevalences_t_sd[i] = sd_prevalence
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(14, 14 / 16 * 9))
ax1, ax2, ax3 = axes
ax1.plot(ps, peak_times, ps, peak_times_q, ps, peak_times_t)
ax1.set_ylabel('Peak time')
ax2.plot(ps, peak_heights, ps, peak_heights_q, ps, peak_heights_t)
ax2.set_ylabel('Peak prevalence')
ax3.plot(ps, period_prevalences, ps, period_prevalences_q, ps, period_prevalences_t)
ax3.set_ylabel('Fraction of affected')
ax3.set_xlabel('p')
ax3.set_xticks(ps[1:-2], minor=True)
ax3.set_xticks([interval[0], interval[1]])
plt.legend(['Vanilla', 'Quarantine', 'Tracing'])
plt.tick_params(
axis='x',
which='minor',
# bottom=False, # ticks along the bottom edge are off
# top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
# plt.xticks([interval[0],interval[1]])
parent = os.path.dirname(path)
fig.savefig(os.path.join(parent, 'Pics', 'pvaried_n{}_mc{}_{}'.format(n, mc_iterations, 'comp') + '.png'),
bbox_inches='tight')
# this feels pretty uninteresting:
def vary_p_i(res, n, p, mc_iterations, max_t, seed=0, mode=None, force_recompute=False, path=None):
# here I want to systematically check what varying the edge probability does. Should return something like a 1d heatmap?
# return value should use one of the values t_peak, peak_height, equilib_flag, period_prevalence
peak_times = np.ndarray(res)
peak_heights = np.ndarray(res)
peak_heights_sd = np.ndarray(res)
# flags = np.ndarray(res)
period_prevalences = np.ndarray(res)
period_prevalences_sd = np.ndarray(res)
p_is = np.linspace(0, 1, endpoint=True, num=res)
| |
x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
ExchangeID = property(_xmdapi.CTORATstpSecurityStatusField_ExchangeID_get, _xmdapi.CTORATstpSecurityStatusField_ExchangeID_set)
SecurityID = property(_xmdapi.CTORATstpSecurityStatusField_SecurityID_get, _xmdapi.CTORATstpSecurityStatusField_SecurityID_set)
IsSuspend = property(_xmdapi.CTORATstpSecurityStatusField_IsSuspend_get, _xmdapi.CTORATstpSecurityStatusField_IsSuspend_set)
IsBreak = property(_xmdapi.CTORATstpSecurityStatusField_IsBreak_get, _xmdapi.CTORATstpSecurityStatusField_IsBreak_set)
IsLongSuspend = property(_xmdapi.CTORATstpSecurityStatusField_IsLongSuspend_get, _xmdapi.CTORATstpSecurityStatusField_IsLongSuspend_set)
IsCircuitBreak = property(_xmdapi.CTORATstpSecurityStatusField_IsCircuitBreak_get, _xmdapi.CTORATstpSecurityStatusField_IsCircuitBreak_set)
IsSupportMarginBuy = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportMarginBuy_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportMarginBuy_set)
IsSupportShortSell = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportShortSell_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportShortSell_set)
IsSupportPur = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportPur_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportPur_set)
IsSupportRed = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportRed_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportRed_set)
IsSupportSplit = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportSplit_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportSplit_set)
IsSupportMerge = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportMerge_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportMerge_set)
IsSupportPleadgeIn = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportPleadgeIn_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportPleadgeIn_set)
IsSupportPleadgeOut = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportPleadgeOut_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportPleadgeOut_set)
IsSupportRoundLotBuy = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportRoundLotBuy_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportRoundLotBuy_set)
IsSupportRoundLotSell = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportRoundLotSell_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportRoundLotSell_set)
IsSupportOddLotBuy = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportOddLotBuy_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportOddLotBuy_set)
IsSupportOddLotSell = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportOddLotSell_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportOddLotSell_set)
IsSupportExercise = property(_xmdapi.CTORATstpSecurityStatusField_IsSupportExercise_get, _xmdapi.CTORATstpSecurityStatusField_IsSupportExercise_set)
IsLimitBuy = property(_xmdapi.CTORATstpSecurityStatusField_IsLimitBuy_get, _xmdapi.CTORATstpSecurityStatusField_IsLimitBuy_set)
IsLimitSell = property(_xmdapi.CTORATstpSecurityStatusField_IsLimitSell_get, _xmdapi.CTORATstpSecurityStatusField_IsLimitSell_set)
IsLimitCover = property(_xmdapi.CTORATstpSecurityStatusField_IsLimitCover_get, _xmdapi.CTORATstpSecurityStatusField_IsLimitCover_set)
IsLimitMarketMaker = property(_xmdapi.CTORATstpSecurityStatusField_IsLimitMarketMaker_get, _xmdapi.CTORATstpSecurityStatusField_IsLimitMarketMaker_set)
def __init__(self):
_xmdapi.CTORATstpSecurityStatusField_swiginit(self, _xmdapi.new_CTORATstpSecurityStatusField())
__swig_destroy__ = _xmdapi.delete_CTORATstpSecurityStatusField
# Register CTORATstpSecurityStatusField in _xmdapi:
_xmdapi.CTORATstpSecurityStatusField_swigregister(CTORATstpSecurityStatusField)
class CTORATstpMarketStatusField(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
MarketID = property(_xmdapi.CTORATstpMarketStatusField_MarketID_get, _xmdapi.CTORATstpMarketStatusField_MarketID_set)
MarketStatus = property(_xmdapi.CTORATstpMarketStatusField_MarketStatus_get, _xmdapi.CTORATstpMarketStatusField_MarketStatus_set)
def __init__(self):
_xmdapi.CTORATstpMarketStatusField_swiginit(self, _xmdapi.new_CTORATstpMarketStatusField())
__swig_destroy__ = _xmdapi.delete_CTORATstpMarketStatusField
# Register CTORATstpMarketStatusField in _xmdapi:
_xmdapi.CTORATstpMarketStatusField_swigregister(CTORATstpMarketStatusField)
class CTORATstpImcParamsField(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
MarketID = property(_xmdapi.CTORATstpImcParamsField_MarketID_get, _xmdapi.CTORATstpImcParamsField_MarketID_set)
OpenFlag = property(_xmdapi.CTORATstpImcParamsField_OpenFlag_get, _xmdapi.CTORATstpImcParamsField_OpenFlag_set)
ThresholdAmount = property(_xmdapi.CTORATstpImcParamsField_ThresholdAmount_get, _xmdapi.CTORATstpImcParamsField_ThresholdAmount_set)
PosAmt = property(_xmdapi.CTORATstpImcParamsField_PosAmt_get, _xmdapi.CTORATstpImcParamsField_PosAmt_set)
AmountStatus = property(_xmdapi.CTORATstpImcParamsField_AmountStatus_get, _xmdapi.CTORATstpImcParamsField_AmountStatus_set)
def __init__(self):
_xmdapi.CTORATstpImcParamsField_swiginit(self, _xmdapi.new_CTORATstpImcParamsField())
__swig_destroy__ = _xmdapi.delete_CTORATstpImcParamsField
# Register CTORATstpImcParamsField in _xmdapi:
_xmdapi.CTORATstpImcParamsField_swigregister(CTORATstpImcParamsField)
class CTORATstpRapidMarketDataField(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
SecurityID = property(_xmdapi.CTORATstpRapidMarketDataField_SecurityID_get, _xmdapi.CTORATstpRapidMarketDataField_SecurityID_set)
ExchangeID = property(_xmdapi.CTORATstpRapidMarketDataField_ExchangeID_get, _xmdapi.CTORATstpRapidMarketDataField_ExchangeID_set)
DataTimeStamp = property(_xmdapi.CTORATstpRapidMarketDataField_DataTimeStamp_get, _xmdapi.CTORATstpRapidMarketDataField_DataTimeStamp_set)
PreClosePrice = property(_xmdapi.CTORATstpRapidMarketDataField_PreClosePrice_get, _xmdapi.CTORATstpRapidMarketDataField_PreClosePrice_set)
OpenPrice = property(_xmdapi.CTORATstpRapidMarketDataField_OpenPrice_get, _xmdapi.CTORATstpRapidMarketDataField_OpenPrice_set)
NumTrades = property(_xmdapi.CTORATstpRapidMarketDataField_NumTrades_get, _xmdapi.CTORATstpRapidMarketDataField_NumTrades_set)
TotalVolumeTrade = property(_xmdapi.CTORATstpRapidMarketDataField_TotalVolumeTrade_get, _xmdapi.CTORATstpRapidMarketDataField_TotalVolumeTrade_set)
TotalValueTrade = property(_xmdapi.CTORATstpRapidMarketDataField_TotalValueTrade_get, _xmdapi.CTORATstpRapidMarketDataField_TotalValueTrade_set)
HighestPrice = property(_xmdapi.CTORATstpRapidMarketDataField_HighestPrice_get, _xmdapi.CTORATstpRapidMarketDataField_HighestPrice_set)
LowestPrice = property(_xmdapi.CTORATstpRapidMarketDataField_LowestPrice_get, _xmdapi.CTORATstpRapidMarketDataField_LowestPrice_set)
LastPrice = property(_xmdapi.CTORATstpRapidMarketDataField_LastPrice_get, _xmdapi.CTORATstpRapidMarketDataField_LastPrice_set)
BidPrice1 = property(_xmdapi.CTORATstpRapidMarketDataField_BidPrice1_get, _xmdapi.CTORATstpRapidMarketDataField_BidPrice1_set)
BidVolume1 = property(_xmdapi.CTORATstpRapidMarketDataField_BidVolume1_get, _xmdapi.CTORATstpRapidMarketDataField_BidVolume1_set)
AskPrice1 = property(_xmdapi.CTORATstpRapidMarketDataField_AskPrice1_get, _xmdapi.CTORATstpRapidMarketDataField_AskPrice1_set)
AskVolume1 = property(_xmdapi.CTORATstpRapidMarketDataField_AskVolume1_get, _xmdapi.CTORATstpRapidMarketDataField_AskVolume1_set)
AskPrice2 = property(_xmdapi.CTORATstpRapidMarketDataField_AskPrice2_get, _xmdapi.CTORATstpRapidMarketDataField_AskPrice2_set)
AskVolume2 = property(_xmdapi.CTORATstpRapidMarketDataField_AskVolume2_get, _xmdapi.CTORATstpRapidMarketDataField_AskVolume2_set)
AskPrice3 = property(_xmdapi.CTORATstpRapidMarketDataField_AskPrice3_get, _xmdapi.CTORATstpRapidMarketDataField_AskPrice3_set)
AskVolume3 = property(_xmdapi.CTORATstpRapidMarketDataField_AskVolume3_get, _xmdapi.CTORATstpRapidMarketDataField_AskVolume3_set)
BidPrice2 = property(_xmdapi.CTORATstpRapidMarketDataField_BidPrice2_get, _xmdapi.CTORATstpRapidMarketDataField_BidPrice2_set)
BidVolume2 = property(_xmdapi.CTORATstpRapidMarketDataField_BidVolume2_get, _xmdapi.CTORATstpRapidMarketDataField_BidVolume2_set)
BidPrice3 = property(_xmdapi.CTORATstpRapidMarketDataField_BidPrice3_get, _xmdapi.CTORATstpRapidMarketDataField_BidPrice3_set)
BidVolume3 = property(_xmdapi.CTORATstpRapidMarketDataField_BidVolume3_get, _xmdapi.CTORATstpRapidMarketDataField_BidVolume3_set)
AskPrice4 = property(_xmdapi.CTORATstpRapidMarketDataField_AskPrice4_get, _xmdapi.CTORATstpRapidMarketDataField_AskPrice4_set)
AskVolume4 = property(_xmdapi.CTORATstpRapidMarketDataField_AskVolume4_get, _xmdapi.CTORATstpRapidMarketDataField_AskVolume4_set)
AskPrice5 = property(_xmdapi.CTORATstpRapidMarketDataField_AskPrice5_get, _xmdapi.CTORATstpRapidMarketDataField_AskPrice5_set)
AskVolume5 = property(_xmdapi.CTORATstpRapidMarketDataField_AskVolume5_get, _xmdapi.CTORATstpRapidMarketDataField_AskVolume5_set)
BidPrice4 = property(_xmdapi.CTORATstpRapidMarketDataField_BidPrice4_get, _xmdapi.CTORATstpRapidMarketDataField_BidPrice4_set)
BidVolume4 = property(_xmdapi.CTORATstpRapidMarketDataField_BidVolume4_get, _xmdapi.CTORATstpRapidMarketDataField_BidVolume4_set)
BidPrice5 = property(_xmdapi.CTORATstpRapidMarketDataField_BidPrice5_get, _xmdapi.CTORATstpRapidMarketDataField_BidPrice5_set)
BidVolume5 = property(_xmdapi.CTORATstpRapidMarketDataField_BidVolume5_get, _xmdapi.CTORATstpRapidMarketDataField_BidVolume5_set)
AskPrice6 = property(_xmdapi.CTORATstpRapidMarketDataField_AskPrice6_get, _xmdapi.CTORATstpRapidMarketDataField_AskPrice6_set)
AskVolume6 = property(_xmdapi.CTORATstpRapidMarketDataField_AskVolume6_get, _xmdapi.CTORATstpRapidMarketDataField_AskVolume6_set)
AskPrice7 = property(_xmdapi.CTORATstpRapidMarketDataField_AskPrice7_get, _xmdapi.CTORATstpRapidMarketDataField_AskPrice7_set)
AskVolume7 = property(_xmdapi.CTORATstpRapidMarketDataField_AskVolume7_get, _xmdapi.CTORATstpRapidMarketDataField_AskVolume7_set)
BidPrice6 = property(_xmdapi.CTORATstpRapidMarketDataField_BidPrice6_get, _xmdapi.CTORATstpRapidMarketDataField_BidPrice6_set)
BidVolume6 = property(_xmdapi.CTORATstpRapidMarketDataField_BidVolume6_get, _xmdapi.CTORATstpRapidMarketDataField_BidVolume6_set)
BidPrice7 = property(_xmdapi.CTORATstpRapidMarketDataField_BidPrice7_get, _xmdapi.CTORATstpRapidMarketDataField_BidPrice7_set)
BidVolume7 = property(_xmdapi.CTORATstpRapidMarketDataField_BidVolume7_get, _xmdapi.CTORATstpRapidMarketDataField_BidVolume7_set)
AskPrice8 = property(_xmdapi.CTORATstpRapidMarketDataField_AskPrice8_get, _xmdapi.CTORATstpRapidMarketDataField_AskPrice8_set)
AskVolume8 = property(_xmdapi.CTORATstpRapidMarketDataField_AskVolume8_get, _xmdapi.CTORATstpRapidMarketDataField_AskVolume8_set)
AskPrice9 = property(_xmdapi.CTORATstpRapidMarketDataField_AskPrice9_get, _xmdapi.CTORATstpRapidMarketDataField_AskPrice9_set)
AskVolume9 = property(_xmdapi.CTORATstpRapidMarketDataField_AskVolume9_get, _xmdapi.CTORATstpRapidMarketDataField_AskVolume9_set)
BidPrice8 = property(_xmdapi.CTORATstpRapidMarketDataField_BidPrice8_get, _xmdapi.CTORATstpRapidMarketDataField_BidPrice8_set)
BidVolume8 = property(_xmdapi.CTORATstpRapidMarketDataField_BidVolume8_get, _xmdapi.CTORATstpRapidMarketDataField_BidVolume8_set)
BidPrice9 = property(_xmdapi.CTORATstpRapidMarketDataField_BidPrice9_get, _xmdapi.CTORATstpRapidMarketDataField_BidPrice9_set)
BidVolume9 = property(_xmdapi.CTORATstpRapidMarketDataField_BidVolume9_get, _xmdapi.CTORATstpRapidMarketDataField_BidVolume9_set)
BidPrice10 = property(_xmdapi.CTORATstpRapidMarketDataField_BidPrice10_get, _xmdapi.CTORATstpRapidMarketDataField_BidPrice10_set)
BidVolume10 = property(_xmdapi.CTORATstpRapidMarketDataField_BidVolume10_get, _xmdapi.CTORATstpRapidMarketDataField_BidVolume10_set)
AskPrice10 = property(_xmdapi.CTORATstpRapidMarketDataField_AskPrice10_get, _xmdapi.CTORATstpRapidMarketDataField_AskPrice10_set)
AskVolume10 = property(_xmdapi.CTORATstpRapidMarketDataField_AskVolume10_get, _xmdapi.CTORATstpRapidMarketDataField_AskVolume10_set)
UpperLimitPrice = property(_xmdapi.CTORATstpRapidMarketDataField_UpperLimitPrice_get, _xmdapi.CTORATstpRapidMarketDataField_UpperLimitPrice_set)
LowerLimitPrice = property(_xmdapi.CTORATstpRapidMarketDataField_LowerLimitPrice_get, _xmdapi.CTORATstpRapidMarketDataField_LowerLimitPrice_set)
ClosePrice = property(_xmdapi.CTORATstpRapidMarketDataField_ClosePrice_get, _xmdapi.CTORATstpRapidMarketDataField_ClosePrice_set)
MDSecurityStat = property(_xmdapi.CTORATstpRapidMarketDataField_MDSecurityStat_get, _xmdapi.CTORATstpRapidMarketDataField_MDSecurityStat_set)
IOPV = property(_xmdapi.CTORATstpRapidMarketDataField_IOPV_get, _xmdapi.CTORATstpRapidMarketDataField_IOPV_set)
InnerSell = property(_xmdapi.CTORATstpRapidMarketDataField_InnerSell_get, _xmdapi.CTORATstpRapidMarketDataField_InnerSell_set)
OuterBuy = property(_xmdapi.CTORATstpRapidMarketDataField_OuterBuy_get, _xmdapi.CTORATstpRapidMarketDataField_OuterBuy_set)
BidCount1 = property(_xmdapi.CTORATstpRapidMarketDataField_BidCount1_get, _xmdapi.CTORATstpRapidMarketDataField_BidCount1_set)
AskCount1 = property(_xmdapi.CTORATstpRapidMarketDataField_AskCount1_get, _xmdapi.CTORATstpRapidMarketDataField_AskCount1_set)
AskCount2 = property(_xmdapi.CTORATstpRapidMarketDataField_AskCount2_get, _xmdapi.CTORATstpRapidMarketDataField_AskCount2_set)
AskCount3 = property(_xmdapi.CTORATstpRapidMarketDataField_AskCount3_get, _xmdapi.CTORATstpRapidMarketDataField_AskCount3_set)
BidCount2 = property(_xmdapi.CTORATstpRapidMarketDataField_BidCount2_get, _xmdapi.CTORATstpRapidMarketDataField_BidCount2_set)
BidCount3 = property(_xmdapi.CTORATstpRapidMarketDataField_BidCount3_get, _xmdapi.CTORATstpRapidMarketDataField_BidCount3_set)
AskCount4 = property(_xmdapi.CTORATstpRapidMarketDataField_AskCount4_get, _xmdapi.CTORATstpRapidMarketDataField_AskCount4_set)
AskCount5 = property(_xmdapi.CTORATstpRapidMarketDataField_AskCount5_get, _xmdapi.CTORATstpRapidMarketDataField_AskCount5_set)
BidCount4 = property(_xmdapi.CTORATstpRapidMarketDataField_BidCount4_get, _xmdapi.CTORATstpRapidMarketDataField_BidCount4_set)
BidCount5 = property(_xmdapi.CTORATstpRapidMarketDataField_BidCount5_get, _xmdapi.CTORATstpRapidMarketDataField_BidCount5_set)
AskCount6 = property(_xmdapi.CTORATstpRapidMarketDataField_AskCount6_get, _xmdapi.CTORATstpRapidMarketDataField_AskCount6_set)
AskCount7 = property(_xmdapi.CTORATstpRapidMarketDataField_AskCount7_get, _xmdapi.CTORATstpRapidMarketDataField_AskCount7_set)
BidCount6 = property(_xmdapi.CTORATstpRapidMarketDataField_BidCount6_get, _xmdapi.CTORATstpRapidMarketDataField_BidCount6_set)
BidCount7 = property(_xmdapi.CTORATstpRapidMarketDataField_BidCount7_get, _xmdapi.CTORATstpRapidMarketDataField_BidCount7_set)
AskCount8 = property(_xmdapi.CTORATstpRapidMarketDataField_AskCount8_get, _xmdapi.CTORATstpRapidMarketDataField_AskCount8_set)
AskCount9 = property(_xmdapi.CTORATstpRapidMarketDataField_AskCount9_get, _xmdapi.CTORATstpRapidMarketDataField_AskCount9_set)
BidCount8 = property(_xmdapi.CTORATstpRapidMarketDataField_BidCount8_get, _xmdapi.CTORATstpRapidMarketDataField_BidCount8_set)
BidCount9 = property(_xmdapi.CTORATstpRapidMarketDataField_BidCount9_get, _xmdapi.CTORATstpRapidMarketDataField_BidCount9_set)
BidCount10 = property(_xmdapi.CTORATstpRapidMarketDataField_BidCount10_get, _xmdapi.CTORATstpRapidMarketDataField_BidCount10_set)
AskCount10 = property(_xmdapi.CTORATstpRapidMarketDataField_AskCount10_get, _xmdapi.CTORATstpRapidMarketDataField_AskCount10_set)
def __init__(self):
_xmdapi.CTORATstpRapidMarketDataField_swiginit(self, _xmdapi.new_CTORATstpRapidMarketDataField())
__swig_destroy__ = _xmdapi.delete_CTORATstpRapidMarketDataField
# Register CTORATstpRapidMarketDataField in _xmdapi:
_xmdapi.CTORATstpRapidMarketDataField_swigregister(CTORATstpRapidMarketDataField)
class CTORATstpFundsFlowMarketDataField(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
ExchangeID = property(_xmdapi.CTORATstpFundsFlowMarketDataField_ExchangeID_get, _xmdapi.CTORATstpFundsFlowMarketDataField_ExchangeID_set)
SecurityID = property(_xmdapi.CTORATstpFundsFlowMarketDataField_SecurityID_get, _xmdapi.CTORATstpFundsFlowMarketDataField_SecurityID_set)
UpdateTime = property(_xmdapi.CTORATstpFundsFlowMarketDataField_UpdateTime_get, _xmdapi.CTORATstpFundsFlowMarketDataField_UpdateTime_set)
UpdateMillisec = property(_xmdapi.CTORATstpFundsFlowMarketDataField_UpdateMillisec_get, _xmdapi.CTORATstpFundsFlowMarketDataField_UpdateMillisec_set)
RetailBuyTurnover = property(_xmdapi.CTORATstpFundsFlowMarketDataField_RetailBuyTurnover_get, _xmdapi.CTORATstpFundsFlowMarketDataField_RetailBuyTurnover_set)
RetailBuyVolume = property(_xmdapi.CTORATstpFundsFlowMarketDataField_RetailBuyVolume_get, _xmdapi.CTORATstpFundsFlowMarketDataField_RetailBuyVolume_set)
RetailBuyAmount = property(_xmdapi.CTORATstpFundsFlowMarketDataField_RetailBuyAmount_get, _xmdapi.CTORATstpFundsFlowMarketDataField_RetailBuyAmount_set)
RetailSellTurnover = property(_xmdapi.CTORATstpFundsFlowMarketDataField_RetailSellTurnover_get, _xmdapi.CTORATstpFundsFlowMarketDataField_RetailSellTurnover_set)
RetailSellVolume = property(_xmdapi.CTORATstpFundsFlowMarketDataField_RetailSellVolume_get, _xmdapi.CTORATstpFundsFlowMarketDataField_RetailSellVolume_set)
RetailSellAmount = property(_xmdapi.CTORATstpFundsFlowMarketDataField_RetailSellAmount_get, _xmdapi.CTORATstpFundsFlowMarketDataField_RetailSellAmount_set)
MiddleBuyTurnover = property(_xmdapi.CTORATstpFundsFlowMarketDataField_MiddleBuyTurnover_get, _xmdapi.CTORATstpFundsFlowMarketDataField_MiddleBuyTurnover_set)
MiddleBuyVolume = property(_xmdapi.CTORATstpFundsFlowMarketDataField_MiddleBuyVolume_get, _xmdapi.CTORATstpFundsFlowMarketDataField_MiddleBuyVolume_set)
MiddleBuyAmount = property(_xmdapi.CTORATstpFundsFlowMarketDataField_MiddleBuyAmount_get, _xmdapi.CTORATstpFundsFlowMarketDataField_MiddleBuyAmount_set)
MiddleSellTurnover = property(_xmdapi.CTORATstpFundsFlowMarketDataField_MiddleSellTurnover_get, _xmdapi.CTORATstpFundsFlowMarketDataField_MiddleSellTurnover_set)
MiddleSellVolume = property(_xmdapi.CTORATstpFundsFlowMarketDataField_MiddleSellVolume_get, _xmdapi.CTORATstpFundsFlowMarketDataField_MiddleSellVolume_set)
MiddleSellAmount = property(_xmdapi.CTORATstpFundsFlowMarketDataField_MiddleSellAmount_get, _xmdapi.CTORATstpFundsFlowMarketDataField_MiddleSellAmount_set)
LargeBuyTurnover = property(_xmdapi.CTORATstpFundsFlowMarketDataField_LargeBuyTurnover_get, _xmdapi.CTORATstpFundsFlowMarketDataField_LargeBuyTurnover_set)
LargeBuyVolume = property(_xmdapi.CTORATstpFundsFlowMarketDataField_LargeBuyVolume_get, _xmdapi.CTORATstpFundsFlowMarketDataField_LargeBuyVolume_set)
LargeBuyAmount = property(_xmdapi.CTORATstpFundsFlowMarketDataField_LargeBuyAmount_get, _xmdapi.CTORATstpFundsFlowMarketDataField_LargeBuyAmount_set)
LargeSellTurnover = property(_xmdapi.CTORATstpFundsFlowMarketDataField_LargeSellTurnover_get, _xmdapi.CTORATstpFundsFlowMarketDataField_LargeSellTurnover_set)
LargeSellVolume = property(_xmdapi.CTORATstpFundsFlowMarketDataField_LargeSellVolume_get, _xmdapi.CTORATstpFundsFlowMarketDataField_LargeSellVolume_set)
LargeSellAmount = property(_xmdapi.CTORATstpFundsFlowMarketDataField_LargeSellAmount_get, _xmdapi.CTORATstpFundsFlowMarketDataField_LargeSellAmount_set)
InstitutionBuyTurnover = property(_xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionBuyTurnover_get, _xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionBuyTurnover_set)
InstitutionBuyVolume = property(_xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionBuyVolume_get, _xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionBuyVolume_set)
InstitutionBuyAmount = property(_xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionBuyAmount_get, _xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionBuyAmount_set)
InstitutionSellTurnover = property(_xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionSellTurnover_get, _xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionSellTurnover_set)
InstitutionSellVolume = property(_xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionSellVolume_get, _xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionSellVolume_set)
InstitutionSellAmount = property(_xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionSellAmount_get, _xmdapi.CTORATstpFundsFlowMarketDataField_InstitutionSellAmount_set)
def __init__(self):
_xmdapi.CTORATstpFundsFlowMarketDataField_swiginit(self, _xmdapi.new_CTORATstpFundsFlowMarketDataField())
__swig_destroy__ = _xmdapi.delete_CTORATstpFundsFlowMarketDataField
# Register CTORATstpFundsFlowMarketDataField in _xmdapi:
_xmdapi.CTORATstpFundsFlowMarketDataField_swigregister(CTORATstpFundsFlowMarketDataField)
class CTORATstpFensUserInfoField(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
FensVer = property(_xmdapi.CTORATstpFensUserInfoField_FensVer_get, _xmdapi.CTORATstpFensUserInfoField_FensVer_set)
FensEnvID = property(_xmdapi.CTORATstpFensUserInfoField_FensEnvID_get, _xmdapi.CTORATstpFensUserInfoField_FensEnvID_set)
FensNodeID = property(_xmdapi.CTORATstpFensUserInfoField_FensNodeID_get, _xmdapi.CTORATstpFensUserInfoField_FensNodeID_set)
FensUserID = property(_xmdapi.CTORATstpFensUserInfoField_FensUserID_get, _xmdapi.CTORATstpFensUserInfoField_FensUserID_set)
UserID = property(_xmdapi.CTORATstpFensUserInfoField_UserID_get, _xmdapi.CTORATstpFensUserInfoField_UserID_set)
ClientInfo = property(_xmdapi.CTORATstpFensUserInfoField_ClientInfo_get, _xmdapi.CTORATstpFensUserInfoField_ClientInfo_set)
def __init__(self):
_xmdapi.CTORATstpFensUserInfoField_swiginit(self, _xmdapi.new_CTORATstpFensUserInfoField())
__swig_destroy__ = _xmdapi.delete_CTORATstpFensUserInfoField
# Register CTORATstpFensUserInfoField in _xmdapi:
_xmdapi.CTORATstpFensUserInfoField_swigregister(CTORATstpFensUserInfoField)
class CTORATstpConnectionInfoField(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
InnerIPAddress = property(_xmdapi.CTORATstpConnectionInfoField_InnerIPAddress_get, _xmdapi.CTORATstpConnectionInfoField_InnerIPAddress_set)
InnerPort = property(_xmdapi.CTORATstpConnectionInfoField_InnerPort_get, _xmdapi.CTORATstpConnectionInfoField_InnerPort_set)
OuterIPAddress = property(_xmdapi.CTORATstpConnectionInfoField_OuterIPAddress_get, _xmdapi.CTORATstpConnectionInfoField_OuterIPAddress_set)
OuterPort = property(_xmdapi.CTORATstpConnectionInfoField_OuterPort_get, _xmdapi.CTORATstpConnectionInfoField_OuterPort_set)
MacAddress = property(_xmdapi.CTORATstpConnectionInfoField_MacAddress_get, _xmdapi.CTORATstpConnectionInfoField_MacAddress_set)
def __init__(self):
_xmdapi.CTORATstpConnectionInfoField_swiginit(self, _xmdapi.new_CTORATstpConnectionInfoField())
__swig_destroy__ = _xmdapi.delete_CTORATstpConnectionInfoField
# Register CTORATstpConnectionInfoField in _xmdapi:
_xmdapi.CTORATstpConnectionInfoField_swigregister(CTORATstpConnectionInfoField)
class CTORATstpXMdSpi(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def OnFrontConnected(self):
return _xmdapi.CTORATstpXMdSpi_OnFrontConnected(self)
def OnFrontDisconnected(self, nReason):
return _xmdapi.CTORATstpXMdSpi_OnFrontDisconnected(self, nReason)
def OnRspGetConnectionInfo(self, pConnectionInfoField, pRspInfoField, nRequestID):
return _xmdapi.CTORATstpXMdSpi_OnRspGetConnectionInfo(self, pConnectionInfoField, pRspInfoField, nRequestID)
def OnRspUserLogin(self, pRspUserLoginField, pRspInfoField, nRequestID):
return _xmdapi.CTORATstpXMdSpi_OnRspUserLogin(self, pRspUserLoginField, pRspInfoField, nRequestID)
def OnRspUserLogout(self, pUserLogoutField, pRspInfoField, nRequestID):
return _xmdapi.CTORATstpXMdSpi_OnRspUserLogout(self, pUserLogoutField, pRspInfoField, nRequestID)
def OnRspSubMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspUnSubMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspSubPHMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubPHMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspUnSubPHMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubPHMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspSubSpecialMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubSpecialMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspUnSubSpecialMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubSpecialMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspSubSimplifyMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubSimplifyMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspUnSubSimplifyMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubSimplifyMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspSubSecurityStatus(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubSecurityStatus(self, pSpecificSecurityField, pRspInfoField)
def OnRspUnSubSecurityStatus(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubSecurityStatus(self, pSpecificSecurityField, pRspInfoField)
def OnRspSubMarketStatus(self, pSpecificMarketField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubMarketStatus(self, pSpecificMarketField, pRspInfoField)
def OnRspUnSubMarketStatus(self, pSpecificMarketField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubMarketStatus(self, pSpecificMarketField, pRspInfoField)
def OnRspSubImcParams(self, pSpecificMarketField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubImcParams(self, pSpecificMarketField, pRspInfoField)
def OnRspUnSubImcParams(self, pSpecificMarketField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubImcParams(self, pSpecificMarketField, pRspInfoField)
def OnRspInquiryMarketDataMirror(self, pMarketDataField, pRspInfoField, nRequestID, bIsLast):
return _xmdapi.CTORATstpXMdSpi_OnRspInquiryMarketDataMirror(self, pMarketDataField, pRspInfoField, nRequestID, bIsLast)
def OnRspInquiryPHMarketDataMirror(self, pPHMarketDataField, pRspInfoField, nRequestID, bIsLast):
return _xmdapi.CTORATstpXMdSpi_OnRspInquiryPHMarketDataMirror(self, pPHMarketDataField, pRspInfoField, nRequestID, bIsLast)
def OnRspInquirySpecialMarketDataMirror(self, pMarketDataField, pRspInfoField, nRequestID, bIsLast):
return _xmdapi.CTORATstpXMdSpi_OnRspInquirySpecialMarketDataMirror(self, pMarketDataField, pRspInfoField, nRequestID, bIsLast)
def OnRspSubSPMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubSPMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspUnSubSPMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubSPMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspSubSPSimplifyMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubSPSimplifyMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspUnSubSPSimplifyMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubSPSimplifyMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspSubSPSecurityStatus(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubSPSecurityStatus(self, pSpecificSecurityField, pRspInfoField)
def OnRspUnSubSPSecurityStatus(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubSPSecurityStatus(self, pSpecificSecurityField, pRspInfoField)
def OnRspSubSPMarketStatus(self, pSpecificMarketField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubSPMarketStatus(self, pSpecificMarketField, pRspInfoField)
def OnRspUnSubSPMarketStatus(self, pSpecificMarketField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubSPMarketStatus(self, pSpecificMarketField, pRspInfoField)
def OnRspInquirySPMarketDataMirror(self, pMarketDataField, pRspInfoField, nRequestID, bIsLast):
return _xmdapi.CTORATstpXMdSpi_OnRspInquirySPMarketDataMirror(self, pMarketDataField, pRspInfoField, nRequestID, bIsLast)
def OnRtnMarketData(self, pMarketDataField):
return _xmdapi.CTORATstpXMdSpi_OnRtnMarketData(self, pMarketDataField)
def OnRtnPHMarketData(self, pPHMarketDataField):
return _xmdapi.CTORATstpXMdSpi_OnRtnPHMarketData(self, pPHMarketDataField)
def OnRtnSpecialMarketData(self, pSpecialMarketDataField):
return _xmdapi.CTORATstpXMdSpi_OnRtnSpecialMarketData(self, pSpecialMarketDataField)
def OnRtnSimplifyMarketData(self, pSimplifyMarketDataField):
return _xmdapi.CTORATstpXMdSpi_OnRtnSimplifyMarketData(self, pSimplifyMarketDataField)
def OnRtnSecurityStatus(self, pSecurityStatusField):
return _xmdapi.CTORATstpXMdSpi_OnRtnSecurityStatus(self, pSecurityStatusField)
def OnRtnMarketStatus(self, pMarketStatusField):
return _xmdapi.CTORATstpXMdSpi_OnRtnMarketStatus(self, pMarketStatusField)
def OnRtnImcParams(self, pImcParamsField):
return _xmdapi.CTORATstpXMdSpi_OnRtnImcParams(self, pImcParamsField)
def OnRtnSPMarketData(self, pMarketDataField):
return _xmdapi.CTORATstpXMdSpi_OnRtnSPMarketData(self, pMarketDataField)
def OnRtnSPSimplifyMarketData(self, pSimplifyMarketDataField):
return _xmdapi.CTORATstpXMdSpi_OnRtnSPSimplifyMarketData(self, pSimplifyMarketDataField)
def OnRtnSPSecurityStatus(self, pSecurityStatusField):
return _xmdapi.CTORATstpXMdSpi_OnRtnSPSecurityStatus(self, pSecurityStatusField)
def OnRtnSPMarketStatus(self, pMarketStatusField):
return _xmdapi.CTORATstpXMdSpi_OnRtnSPMarketStatus(self, pMarketStatusField)
def OnRspSubRapidMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubRapidMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspUnSubRapidMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubRapidMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRtnRapidMarketData(self, pRapidMarketDataField):
return _xmdapi.CTORATstpXMdSpi_OnRtnRapidMarketData(self, pRapidMarketDataField)
def OnRspSubFundsFlowMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspSubFundsFlowMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRspUnSubFundsFlowMarketData(self, pSpecificSecurityField, pRspInfoField):
return _xmdapi.CTORATstpXMdSpi_OnRspUnSubFundsFlowMarketData(self, pSpecificSecurityField, pRspInfoField)
def OnRtnFundsFlowMarketData(self, pFundsFlowMarketDataField):
return _xmdapi.CTORATstpXMdSpi_OnRtnFundsFlowMarketData(self, pFundsFlowMarketDataField)
def __init__(self):
if self.__class__ == CTORATstpXMdSpi:
_self = None
else:
_self = self
_xmdapi.CTORATstpXMdSpi_swiginit(self, _xmdapi.new_CTORATstpXMdSpi(_self, ))
__swig_destroy__ = _xmdapi.delete_CTORATstpXMdSpi
def __disown__(self):
self.this.disown()
_xmdapi.disown_CTORATstpXMdSpi(self)
return weakref.proxy(self)
# Register CTORATstpXMdSpi in _xmdapi:
_xmdapi.CTORATstpXMdSpi_swigregister(CTORATstpXMdSpi)
class CTORATstpXMdApi(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
@staticmethod
def CreateTstpXMdApi(*args):
return _xmdapi.CTORATstpXMdApi_CreateTstpXMdApi(*args)
@staticmethod
def GetApiVersion():
return _xmdapi.CTORATstpXMdApi_GetApiVersion()
def Release(self):
return _xmdapi.CTORATstpXMdApi_Release(self)
def Init(self):
return _xmdapi.CTORATstpXMdApi_Init(self)
def Join(self):
return _xmdapi.CTORATstpXMdApi_Join(self)
def RegisterFront(self, pszFrontAddress):
return _xmdapi.CTORATstpXMdApi_RegisterFront(self, pszFrontAddress)
def RegisterNameServer(self, pszNsAddress):
return _xmdapi.CTORATstpXMdApi_RegisterNameServer(self, pszNsAddress)
def RegisterFensUserInfo(self, pFensUserInfoField):
return _xmdapi.CTORATstpXMdApi_RegisterFensUserInfo(self, pFensUserInfoField)
def RegisterMulticast(self, pszMulticastAddress, pszInterfaceIP, pszSourceIp):
return _xmdapi.CTORATstpXMdApi_RegisterMulticast(self, pszMulticastAddress, pszInterfaceIP, pszSourceIp)
def RegisterDeriveServer(self, pszDeriveAddress):
return _xmdapi.CTORATstpXMdApi_RegisterDeriveServer(self, pszDeriveAddress)
def RegisterDeriveMulticast(self, pszMulticastAddress, pszInterfaceIP, pszSourceIp):
| |
<filename>PyMRFLSSVM/Batch_MRF_Helpers.py
# -*- coding: utf-8 -*-
__author__ = 'spacegoing'
from CyInf.WllepGraphCut import Inf_Algo
from MrfTypes import Example, Options
import numpy as np
import scipy as sp
def phi_helper(ex, label_inferred, latent_inferred, options):
'''
:param ex:
:type ex: Example
:param options:
:type options: Options
:return:
:rtype:
'''
# unary_observed = instance.unary_observed
# pairwise = instance.pairwise
# labels = instance.y
# latent_var = instance.latent_var
# clique_indexes = instance.clique_indexes. Note: clique id starts from 1
#
# options = Options()
# unary phi
unary_phi = sum(ex.unary_observed[:, :, 0].flatten()[label_inferred.flatten() == 0]) + \
sum(ex.unary_observed[:, :, 1].flatten()[label_inferred.flatten() == 1])
# pairwise phi
pairwise_phi = 0
if ex.hasPairwise:
label = label_inferred.flatten()
for i1, i2, value in ex.pairwise:
if label[int(i1)] != label[int(i2)]:
pairwise_phi += value
# pairwise_phi = sum(label_inferred.flatten()[ex.pairwise[:, 0].astype(np.int)] !=
# label_inferred.flatten()[ex.pairwise[:, 1].astype(np.int)])
# higher order phi
higher_order_phi = np.zeros(2 * options.K - 1, dtype=np.double, order='C')
max_latent_index = [int(sum(latent_inferred[i, :])) for i in range(ex.numCliques)]
# clique index starts from 1
cliques_size = [sum(sum(ex.clique_indexes == i + 1)) for i in range(ex.numCliques)]
cliques_value = [sum(label_inferred.flatten()[ex.clique_indexes.flatten() == i + 1]) /
cliques_size[i] for i in range(ex.numCliques)]
higher_order_phi[0] = sum(cliques_value)
# 1 < i < K
for i in range(ex.numCliques):
# if max_latent_index[i] = 0 < 1
# then higher_order_phi[1:max_latent_index[i]] returns empty
higher_order_phi[1:max_latent_index[i] + 1] += cliques_value[i]
# sum of [[ i-K <= k^* ]] by clique_index
# where k^* is the max_latent_index
db_z = np.sum(latent_inferred, axis=0)
# K <= i < 2K - 1
for i in range(options.K, 2 * options.K - 1):
higher_order_phi[i] = db_z[i - options.K]
phi = np.zeros(options.sizePhi, dtype=np.double, order='C')
phi[:options.sizeHighPhi] = higher_order_phi
phi[options.sizeHighPhi] = unary_phi
phi[options.sizeHighPhi + 1] = pairwise_phi
return phi
def inf_latent_helper(ex, theta_full, options):
'''
:param ex:
:type ex: Example
:param theta_full:
:type theta_full:
:param options:
:type options: Options
:return:
:rtype:
'''
# np.double[:] theta_full contains unary & pairwise params
# Inf_Algo only accepts higher-order params
# # code for debugging
# theta_full = theta
# clique_indexes = instance.clique_indexes
# labels = instance.y
theta = theta_full[:options.sizeHighPhi]
cliques_size = [sum(sum(ex.clique_indexes == i + 1)) for i in range(ex.numCliques)] # clique index starts
# from 1
cliques_value = [sum(ex.y.flatten()[ex.clique_indexes.flatten() == i + 1]) /
cliques_size[i] for i in range(ex.numCliques)]
# # code for debugging
# cliques_unary_value = [sum(instance.unary_observed[:, :, 1].
# flatten()[clique_indexes.flatten() == i + 1]) / cliques_size[i]
# for i in range(options.numCliques)]
# a = np.reshape(cliques_value, [8, 8])
# b = np.reshape(cliques_unary_value, [8, 8])
inferred_latent = np.zeros([ex.numCliques, options.K - 1], dtype=np.int32, order='C')
for i in range(ex.numCliques):
for j in range(options.K - 1):
# z_k = 1 only if (a_{k+1}-a_k)W_c(y_c) + b_{k+1}-b_k) < 0
inferred_latent[i][j] = 1 if (theta[1 + j] * cliques_value[i] +
theta[j + options.K] < 0) else 0
return inferred_latent
class Old_Option:
def __init__(self, rows, cols, numCliques,
K, hasPairwise, learningQP):
self.H = rows
self.W = cols
self.numCliques = numCliques
self.K = K
self.hasPairwise = hasPairwise
self.learningQP = learningQP
def inf_label_latent_helper(unary_observed, pairwise, clique_indexes, theta_full, options, hasPairwise=False):
'''
:param unary_observed:
:type unary_observed:
:param pairwise:
:type pairwise:
:param clique_indexes:
:type clique_indexes:
:param theta_full:
:type theta_full:
:param options:
:type options: Options
:return:
:rtype:
'''
# unary_observed = u
# pairwise = p
# clique_indexes = c
# theta_full = t
# hasPairwise = True
rows = unary_observed.shape[0]
cols = unary_observed.shape[1]
numCliques = len(np.unique(clique_indexes))
# np.double[:] theta_full contains unary & pairwise params
# Inf_Algo only accepts higher-order params
theta = theta_full[:options.sizeHighPhi]
# inferred_label & inferred_z are assigned inside Inf_Algo()
inferred_label = np.zeros([rows, cols], dtype=np.int32, order='C')
inferred_z = np.zeros([numCliques, options.K - 1], dtype=np.int32, order='C')
old_option = Old_Option(rows, cols, numCliques,
options.K, hasPairwise, 1)
# print("%d %d %d %d %d %d" % (old_option.H,
# old_option.W,
# old_option.numCliques,
# old_option.K,
# old_option.hasPairwise,
# old_option.learningQP))
e_i = Inf_Algo(unary_observed, pairwise, clique_indexes,
inferred_label, inferred_z, theta, old_option)
return inferred_label, inferred_z, e_i
def init_theta_concave(example, options):
'''
Initialize theta to encode a set of concave linear equations
according to training data "instance".
It first calculate W(y) of each cliques then determine how
many different W(y) exists (namely linear equations needed).
If options.K < desired number, this function will print a
warning message then quit. User should increase options.K then
run again.
Then it sample a concave linear function equals the estimated
(by W(y)) number of cliques. For extra linear functions
(options.K> number of cliques) it simply initialize them to
redundant functions.
:param example:
:type example: Example
:param options:
:type options: Options
:return:
'''
# clique index starts from 1
cliques_size = [sum(sum(example.clique_indexes == i + 1)) for i in range(example.numCliques)]
cliques_value = [sum(example.y.flatten()[example.clique_indexes.flatten() == i + 1]) /
cliques_size[i] for i in range(example.numCliques)]
unique_value_array = np.unique(cliques_value)
# Check if Current K < potentially best number
print("Potentially best K: %d" % unique_value_array.shape[0])
if options.K < unique_value_array.shape[0]:
print("Warning: Current K: %d < potentially best %d\n "
"unique_value_array is shortened to fit options.K \n"
"User may consider increase options.K"
% (options.K, unique_value_array.shape[0]))
shorten_indexes = [int(i) for i in
np.linspace(0, len(unique_value_array) - 1, options.K)]
unique_value_array = unique_value_array[shorten_indexes]
# print("Warning: Current K: %d < potentially best %d, please increase K then run again"
# % (options.K, unique_value_array.shape[0]))
# raise ValueError("see warning info")
# Mid points between unique values.
mid_points_array = np.zeros([unique_value_array.shape[0] - 1])
for i in range(1, unique_value_array.shape[0]):
mid_points_array[i - 1] = (unique_value_array[i - 1] + unique_value_array[i]) / 2
# sample a set of concave linear functions based on those points
# initialize a_b parameters matrix (a,b) and sampled points matrix (x,y)
a_b = np.zeros([options.K, 2])
sampled_points = np.zeros([mid_points_array.shape[0] + 2, 2])
sampled_points[1:mid_points_array.shape[0] + 1, 0] = mid_points_array
sampled_points[mid_points_array.shape[0] + 1, 0] = 1
if sampled_points.shape[0] < options.K + 1:
redund_points = np.zeros([options.K + 1 - sampled_points.shape[0], 2])
redund_points[:, 0] = np.linspace(1.1,
1 + 0.1 * (options.K + 1 - sampled_points.shape[0]),
options.K + 1 - sampled_points.shape[0])
sampled_points = np.r_[sampled_points, redund_points]
# Sample the first point
sampled_points[1, 1] = np.random.uniform(sampled_points[1, 0], 1, 1)[0]
a_b[0, 0] = (sampled_points[1, 1] - sampled_points[0, 1]) / \
(sampled_points[1, 0] - sampled_points[0, 0])
# Sample other points
for i in range(1, options.K):
up_bound = a_b[i - 1, 0] * sampled_points[i + 1, 0] + a_b[i - 1, 1] - 1e-9
sampled_points[i + 1, 1] = np.random.uniform(up_bound - 0.5, up_bound, 1)[0]
if (sampled_points[i + 1, 0] - sampled_points[i, 0]) != 0:
a_b[i, 0] = (sampled_points[i + 1, 1] - sampled_points[i, 1]) / \
(sampled_points[i + 1, 0] - sampled_points[i, 0])
a_b[i, 1] = sampled_points[i + 1, 1] - a_b[i, 0] * sampled_points[i + 1, 0]
else:
a_b[i, 0] = 0
a_b[i, 1] = sampled_points[i + 1, 1]
# encode a_b into theta
theta = [a_b[0, 0]]
# a_{k}-a{k-1}
for i in range(1, options.K):
theta.append(a_b[i, 0] - a_b[i - 1, 0])
# b{k}-b{k-1}
for i in range(1, options.K):
theta.append(a_b[i, 1] - a_b[i - 1, 1])
# unary, pairwise and slack
theta += [np.random.uniform(-1, 1, 1)[0]] + list(np.random.rand(1, 2)[0, :])
return np.asarray(theta, dtype=np.double, order='C')
def init_theta_exlarge(options, first_a=1e6, dec=1e2):
'''
:param options:
:type options: Options
:return:
:rtype:
'''
intervals = sp.linspace(0, 1, options.K + 1)
a_b_array = np.zeros([options.K, 2], dtype=np.double)
a_b_array[0, 0] = first_a
for i in range(1, options.K):
a_b_array[i, 0] = a_b_array[i - 1, 0] - dec
inter_point_y = a_b_array[i - 1, 0] * intervals[i] \
+ a_b_array[i - 1, 1]
a_b_array[i, 1] = inter_point_y - a_b_array[i, 0] * intervals[i]
theta = np.zeros(options.sizePhi + 1, dtype=np.double)
theta[0] = a_b_array[0, 0]
for i in range(1, options.K):
theta[i] = a_b_array[i, 0] - a_b_array[i - 1, 0]
theta[i + options.K - 1] = a_b_array[i, 1] - a_b_array[i - 1, 1]
theta[-3] = 1e-21
theta[-2] = 1
return theta
def remove_redundancy_theta(theta, options, eps=1e-5):
'''
:param theta:
:type theta: np.ndarray
:param options:
:type options:Options
:param eps:
:return:
'''
def intersect(a_1, b_1, a_2, b_2, func_idx, i):
if a_1 - a_2 == 0:
print(theta)
raise ValueError('Intersection Equals 0!\ntheta: %d and %d' % (func_idx, i))
x = (b_2 - b_1) / (a_1 - a_2)
y = (a_1 * b_2 - a_2 * b_1) / (a_1 - a_2)
# Can't exceed 1
if x > 1:
x = 1
y = a_1 + | |
<filename>imperative/python/test/unit/random/test_rng.py
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import Tensor
from megengine.core._imperative_rt import CompNode
from megengine.core._imperative_rt.core2 import apply
from megengine.core._imperative_rt.ops import (
delete_rng_handle,
get_global_rng_seed,
new_rng_handle,
)
from megengine.core.ops.builtin import (
BetaRNG,
GammaRNG,
GaussianRNG,
PermutationRNG,
PoissonRNG,
UniformRNG,
)
from megengine.device import get_device_count
from megengine.random import RNG, seed, uniform
@pytest.mark.skipif(
get_device_count("xpu") <= 2, reason="xpu counts need > 2",
)
def test_gaussian_op():
shape = (
8,
9,
11,
12,
)
shape = Tensor(shape, dtype="int32")
op = GaussianRNG(seed=get_global_rng_seed(), mean=1.0, std=3.0, dtype="float32")
(output,) = apply(op, shape)
assert np.fabs(output.numpy().mean() - 1.0) < 1e-1
assert np.fabs(np.sqrt(output.numpy().var()) - 3.0) < 1e-1
assert str(output.device) == str(CompNode("xpux"))
assert output.dtype == np.float32
cn = CompNode("xpu2")
seed = 233333
h = new_rng_handle(cn, seed)
op = GaussianRNG(seed=seed, mean=3.0, std=1.0, dtype="float32", handle=h)
(output,) = apply(op, shape)
delete_rng_handle(h)
assert np.fabs(output.numpy().mean() - 3.0) < 1e-1
assert np.fabs(np.sqrt(output.numpy().var()) - 1.0) < 1e-1
assert str(output.device) == str(cn)
assert output.dtype == np.float32
@pytest.mark.skipif(
get_device_count("xpu") <= 2, reason="xpu counts need > 2",
)
def test_uniform_op():
shape = (
8,
9,
11,
12,
)
shape = Tensor(shape, dtype="int32")
op = UniformRNG(seed=get_global_rng_seed(), dtype="float32")
(output,) = apply(op, shape)
assert np.fabs(output.numpy().mean() - 0.5) < 1e-1
assert str(output.device) == str(CompNode("xpux"))
assert output.dtype == np.float32
cn = CompNode("xpu2")
seed = 233333
h = new_rng_handle(cn, seed)
op = UniformRNG(seed=seed, dtype="float32", handle=h)
(output,) = apply(op, shape)
delete_rng_handle(h)
assert np.fabs(output.numpy().mean() - 0.5) < 1e-1
assert str(output.device) == str(cn)
assert output.dtype == np.float32
@pytest.mark.skipif(
get_device_count("xpu") <= 2, reason="xpu counts need > 2",
)
def test_gamma_op():
_shape, _scale = 2, 0.8
_expected_mean, _expected_std = _shape * _scale, np.sqrt(_shape) * _scale
shape = F.full([8, 9, 11, 12], value=_shape, dtype="float32")
scale = F.full([8, 9, 11, 12], value=_scale, dtype="float32")
op = GammaRNG(seed=get_global_rng_seed(), handle=0)
(output,) = apply(op, shape, scale)
assert np.fabs(output.numpy().mean() - _expected_mean) < 1e-1
assert np.fabs(np.sqrt(output.numpy().var()) - _expected_std) < 1e-1
assert str(output.device) == str(CompNode("xpux"))
cn = CompNode("xpu2")
seed = 233333
h = new_rng_handle(cn, seed)
shape = F.full([8, 9, 11, 12], value=_shape, dtype="float32", device="xpu2")
scale = F.full([8, 9, 11, 12], value=_scale, dtype="float32", device="xpu2")
op = GammaRNG(seed=seed, handle=h)
(output,) = apply(op, shape, scale)
delete_rng_handle(h)
assert np.fabs(output.numpy().mean() - _expected_mean) < 1e-1
assert np.fabs(np.sqrt(output.numpy().var()) - _expected_std) < 1e-1
assert str(output.device) == str(cn)
@pytest.mark.skipif(
get_device_count("xpu") <= 2, reason="xpu counts need > 2",
)
def test_beta_op():
_alpha, _beta = 2, 0.8
_expected_mean = _alpha / (_alpha + _beta)
_expected_std = np.sqrt(
_alpha * _beta / ((_alpha + _beta) ** 2 * (_alpha + _beta + 1))
)
alpha = F.full([8, 9, 11, 12], value=_alpha, dtype="float32")
beta = F.full([8, 9, 11, 12], value=_beta, dtype="float32")
op = BetaRNG(seed=get_global_rng_seed())
(output,) = apply(op, alpha, beta)
assert np.fabs(output.numpy().mean() - _expected_mean) < 1e-1
assert np.fabs(np.sqrt(output.numpy().var()) - _expected_std) < 1e-1
assert str(output.device) == str(CompNode("xpux"))
cn = CompNode("xpu2")
seed = 233333
h = new_rng_handle(cn, seed)
alpha = F.full([8, 9, 11, 12], value=_alpha, dtype="float32", device=cn)
beta = F.full([8, 9, 11, 12], value=_beta, dtype="float32", device=cn)
op = BetaRNG(seed=seed, handle=h)
(output,) = apply(op, alpha, beta)
delete_rng_handle(h)
assert np.fabs(output.numpy().mean() - _expected_mean) < 1e-1
assert np.fabs(np.sqrt(output.numpy().var()) - _expected_std) < 1e-1
assert str(output.device) == str(cn)
@pytest.mark.skipif(
get_device_count("xpu") <= 2, reason="xpu counts need > 2",
)
def test_poisson_op():
lam = F.full([8, 9, 11, 12], value=2, dtype="float32")
op = PoissonRNG(seed=get_global_rng_seed())
(output,) = apply(op, lam)
assert np.fabs(output.numpy().mean() - 2.0) < 1e-1
assert np.fabs(np.sqrt(output.numpy().var()) - np.sqrt(2.0)) < 1e-1
assert str(output.device) == str(CompNode("xpux"))
cn = CompNode("xpu2")
seed = 233333
h = new_rng_handle(cn, seed)
lam = F.full([8, 9, 11, 12], value=2, dtype="float32", device=cn)
op = PoissonRNG(seed=seed, handle=h)
(output,) = apply(op, lam)
delete_rng_handle(h)
assert np.fabs(output.numpy().mean() - 2.0) < 1e-1
assert np.fabs(np.sqrt(output.numpy().var()) - np.sqrt(2.0)) < 1e-1
assert str(output.device) == str(cn)
@pytest.mark.skipif(
get_device_count("xpu") <= 2, reason="xpu counts need > 2",
)
def test_permutation_op():
n = 1000
def test_permutation_op_dtype(dtype):
def sum_result(res, fun):
return sum([1 if i == v else 0 for i, v in enumerate(fun(res.numpy()))])
shape = Tensor((n,), dtype="int32")
op = PermutationRNG(seed=get_global_rng_seed(), dtype=dtype)
(output,) = apply(op, shape)
assert sum_result(output, lambda x: x) < 500
assert sum_result(output, np.sort) == n
assert str(output.device) == str(CompNode("xpux"))
assert output.dtype == dtype
cn = CompNode("xpu2")
seed = 233333
h = new_rng_handle(cn, seed)
op = PermutationRNG(seed=seed, handle=h, dtype=dtype)
(output,) = apply(op, shape)
delete_rng_handle(h)
assert sum_result(output, lambda x: x) < 500
assert sum_result(output, np.sort) == n
assert str(output.device) == str(cn)
assert output.dtype == dtype
test_permutation_op_dtype(np.float32)
test_permutation_op_dtype(np.int32)
test_permutation_op_dtype(np.int16)
@pytest.mark.skipif(
get_device_count("xpu") <= 1, reason="xpu counts need > 1",
)
def test_UniformRNG():
m1 = RNG(seed=111, device="xpu0")
m2 = RNG(seed=111, device="xpu1")
m3 = RNG(seed=222, device="xpu0")
out1 = m1.uniform(size=(100,))
out1_ = m1.uniform(size=(100,))
out2 = m2.uniform(size=(100,))
out3 = m3.uniform(size=(100,))
np.testing.assert_equal(out1.numpy(), out2.numpy())
assert out1.device == "xpu0" and out2.device == "xpu1"
assert not (out1.numpy() == out3.numpy()).all()
assert not (out1.numpy() == out1_.numpy()).all()
low = -234
high = 123
out = m1.uniform(low=low, high=high, size=(20, 30, 40))
out_shp = out.shape
if isinstance(out_shp, tuple):
assert out_shp == (20, 30, 40)
else:
assert all(out.shape.numpy() == np.array([20, 30, 40]))
assert np.abs(out.mean().numpy() - ((low + high) / 2)) / (high - low) < 0.1
@pytest.mark.skipif(
get_device_count("xpu") <= 1, reason="xpu counts need > 1",
)
def test_NormalRNG():
m1 = RNG(seed=111, device="xpu0")
m2 = RNG(seed=111, device="xpu1")
m3 = RNG(seed=222, device="xpu0")
out1 = m1.normal(size=(100,))
out1_ = m1.uniform(size=(100,))
out2 = m2.normal(size=(100,))
out3 = m3.normal(size=(100,))
np.testing.assert_equal(out1.numpy(), out2.numpy())
assert out1.device == "xpu0" and out2.device == "xpu1"
assert not (out1.numpy() == out3.numpy()).all()
assert not (out1.numpy() == out1_.numpy()).all()
mean = -1
std = 2
out = m1.normal(mean=mean, std=std, size=(20, 30, 40))
out_shp = out.shape
if isinstance(out_shp, tuple):
assert out_shp == (20, 30, 40)
else:
assert all(out.shape.numpy() == np.array([20, 30, 40]))
assert np.abs(out.mean().numpy() - mean) / std < 0.1
assert np.abs(np.std(out.numpy()) - std) < 0.1
@pytest.mark.skipif(
get_device_count("xpu") <= 1, reason="xpu counts need > 1",
)
def test_GammaRNG():
m1 = RNG(seed=111, device="xpu0")
m2 = RNG(seed=111, device="xpu1")
m3 = RNG(seed=222, device="xpu0")
out1 = m1.gamma(2, size=(100,))
out1_ = m1.uniform(size=(100,))
out2 = m2.gamma(2, size=(100,))
out3 = m3.gamma(2, size=(100,))
np.testing.assert_equal(out1.numpy(), out2.numpy())
assert out1.device == "xpu0" and out2.device == "xpu1"
assert not (out1.numpy() == out3.numpy()).all()
assert not (out1.numpy() == out1_.numpy()).all()
shape = Tensor([[2, 3, 4], [9, 10, 11]], dtype=np.float32, device="xpu0")
scale = Tensor([0.5, 1, 1.5], dtype=np.float32, device="xpu0")
expected_mean = (shape * scale).numpy()
expected_std = (F.sqrt(shape) * scale).numpy()
out = m1.gamma(shape=shape, scale=scale, size=(20, 30, 40))
out_shp = out.shape
if isinstance(out_shp, tuple):
assert out_shp == (20, 30, 40, 2, 3)
else:
assert all(out.shape.numpy() == np.array([20, 30, 40, 2, 3]))
assert (
np.abs(out.mean(axis=(0, 1)).numpy() - expected_mean) / expected_std
).mean() < 0.1
assert (np.abs(np.std(out.numpy(), axis=(0, 1)) - expected_std)).mean() < 0.1
@pytest.mark.skipif(
get_device_count("xpu") <= 1, reason="xpu counts need > 1",
)
def test_BetaRNG():
m1 = RNG(seed=111, device="xpu0")
m2 = RNG(seed=111, device="xpu1")
m3 = RNG(seed=222, device="xpu0")
out1 = m1.beta(2, 1, size=(100,))
out1_ = m1.uniform(size=(100,))
out2 = m2.beta(2, 1, size=(100,))
out3 = m3.beta(2, 1, size=(100,))
np.testing.assert_equal(out1.numpy(), out2.numpy())
assert out1.device == "xpu0" and out2.device == "xpu1"
assert not (out1.numpy() == out3.numpy()).all()
assert not (out1.numpy() == out1_.numpy()).all()
alpha = Tensor([[2, 3, 4], [9, 10, 11]], dtype=np.float32, device="xpu0")
beta = Tensor([0.5, 1, 1.5], dtype=np.float32, device="xpu0")
expected_mean = (alpha / (alpha + beta)).numpy()
expected_std = (
F.sqrt(alpha * beta / (F.pow(alpha + beta, 2) * (alpha + beta + 1)))
).numpy()
out = m1.beta(alpha=alpha, beta=beta, size=(20, 30))
out_shp = out.shape
if isinstance(out_shp, tuple):
assert out_shp == (20, 30, 2, 3)
else:
assert all(out.shape.numpy() == np.array([20, 30, 2, 3]))
assert (
np.abs(out.mean(axis=(0, 1)).numpy() - expected_mean) / expected_std
).mean() < 0.1
assert (np.abs(np.std(out.numpy(), axis=(0, 1)) - expected_std)).mean() < 0.1
@pytest.mark.skipif(
get_device_count("xpu") <= 1, reason="xpu counts need > 1",
)
def test_PoissonRNG():
m1 = RNG(seed=111, device="xpu0")
m2 = RNG(seed=111, device="xpu1")
m3 = RNG(seed=222, device="xpu0")
lam = Tensor([[2, 3, 4], [9, 10, 11]], dtype=np.float32)
out1 = m1.poisson(lam.to("xpu0"), size=(100,))
out2 = m2.poisson(lam.to("xpu1"), size=(100,))
out3 = m3.poisson(lam.to("xpu0"), size=(100,))
np.testing.assert_equal(out1.numpy(), out2.numpy())
assert out1.device == "xpu0" and out2.device == "xpu1"
assert not (out1.numpy() == out3.numpy()).all()
out = m1.poisson(lam.to("xpu0"), size=(20, 30))
out_shp = out.shape
expected_shape = (20, 30) + lam._tuple_shape
if isinstance(out_shp, tuple):
assert out_shp == expected_shape
else:
assert all(out.shape.numpy() == np.array(expected_shape))
lam = lam.numpy()
assert (np.abs(out.mean(axis=(0, 1)).numpy() - lam) / np.sqrt(lam)).mean() < 0.1
assert | |
Set exit code integer (default: 23)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv4 address string (example: '192.168.1.254') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv4-gateway'] is not None:
return self._network_interfaces_settings[interface_name]['ipv4-gateway']
return self.get_interface_gateway(interface_name=interface_name,
network_type=AF_INET,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
def get_interface_ipv6_gateway(self,
interface_name: str = 'eth0',
exit_on_failure: bool = True,
exit_code: int = 24,
quiet: bool = False) -> Union[None, str]:
"""
Get IPv6 gateway address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 24)
:param quiet: Quiet mode, if True no console output (default: False)
:return: IPv6 address string (example: 'fd00::1') or None in case of error
"""
if interface_name in self._network_interfaces_settings.keys():
if self._network_interfaces_settings[interface_name]['ipv6-gateway'] is not None:
return self._network_interfaces_settings[interface_name]['ipv6-gateway']
return self.get_interface_gateway(interface_name=interface_name,
network_type=AF_INET6,
exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet)
def add_multicast_mac_address(self,
interface_name: str = 'eth0',
multicast_mac_address: str = '33:33:00:00:00:02',
exit_on_failure: bool = True,
exit_code: int = 24,
quiet: bool = False) -> bool:
"""
Add Multicast MAC address on network interface
:param interface_name: Network interface name (default: 'eth0')
:param multicast_mac_address: Multicast MAC address (example: '33:33:00:00:00:02')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 24)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True if success or False if error
"""
if interface_name in self._network_interfaces_multicast_macs.keys():
if multicast_mac_address in self._network_interfaces_multicast_macs[interface_name]:
return True
else:
self._network_interfaces_multicast_macs[interface_name]: List[str] = list()
try:
# region Windows
if self.get_platform().startswith('Windows'):
pass
# endregion
# region MacOS
elif self.get_platform().startswith('Darwin'):
self._network_interfaces_multicast_macs[interface_name].append(multicast_mac_address)
return True
# endregion
# region Linux
elif self.get_platform().startswith('Linux'):
mcast_addresses = sub.run(['ip maddress show ' + interface_name],
shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
mcast_addresses = mcast_addresses.stdout.decode('utf-8')
if multicast_mac_address in mcast_addresses:
self._network_interfaces_multicast_macs[interface_name].append(multicast_mac_address)
else:
add_mcast_address = sub.run(['ip maddress add ' + multicast_mac_address + ' dev ' + interface_name],
shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
add_mcast_address = add_mcast_address.stdout.decode('utf-8')
assert add_mcast_address == '', \
'Could not add milticast MAC address: ' + self.error_text(multicast_mac_address) + \
' on interface: ' + self.error_text(interface_name)
self._network_interfaces_multicast_macs[interface_name].append(multicast_mac_address)
if not quiet:
self.print_info('Add milticast MAC address: ', multicast_mac_address,
' on interface: ', interface_name)
return True
# endregion
else:
assert False, 'Your platform: ' + self.error_text(self.get_platform()) + ' is not supported!'
except AssertionError as Error:
if not quiet:
self.print_error(Error.args[0])
if exit_on_failure:
exit(exit_code)
return False
# endregion
# region Check installed software
def apt_list_installed_packages(self,
exit_on_failure: bool = True,
exit_code: int = 25,
quiet: bool = False) -> Union[None, bytes]:
"""
Get output of bash command: apt list --installed
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 25)
:param quiet: Quiet mode, if True no console output (default: False)
:return: result bytes
"""
try:
apt_list_command = sub.Popen(['apt list --installed'], shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
apt_list_out, apt_list_err = apt_list_command.communicate()
assert apt_list_out is not None, \
'Something else went wrong while trying to run command: ' + \
self.error_text('`apt list --installed`')
self.os_installed_packages_list = apt_list_out
return apt_list_out
except OSError:
error_text = 'Something else went wrong while trying to run command: ' + \
self.error_text('`apt list --installed`')
except AssertionError as Error:
error_text = Error.args[0]
if not quiet:
self.print_error(error_text)
if exit_on_failure:
exit(exit_code)
return None
def check_installed_software(self,
software_name: str = 'apache2',
exit_on_failure: bool = True,
exit_code: int = 26,
quiet: bool = False) -> bool:
"""
Check software is installed or not
:param software_name: Name of software (default: 'apache2')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 26)
:param quiet: Quiet mode, if True no console output (default: False)
:return: True or False
"""
try:
assert self.check_platform(exit_on_failure=exit_on_failure,
exit_code=exit_code,
quiet=quiet), \
'This is not a Linux platform'
assert not ('Kali' in linux_distribution()
or 'Debian' in linux_distribution()
or 'Ubuntu' in linux_distribution()), \
'Unable to verify OS installed software. ' + \
'This function works normal only in Debian, Ubuntu or Kali linux.'
if self.os_installed_packages_list is None:
self.apt_list_installed_packages(exit_on_failure)
assert self.os_installed_packages_list is not None, 'Unable to verify OS installed software.'
if software_name.encode(encoding='utf-8') in self.os_installed_packages_list:
return True
else:
if isfile('/bin/' + software_name) or isfile('/sbin/' + software_name) or \
isfile('/usr/bin/' + software_name) or isfile('/usr/sbin/' + software_name) or \
isfile('/usr/local/bin/' + software_name) or isfile('/usr/local/sbin/' + software_name):
return True
else:
return False
except AssertionError as Error:
error_text = Error.args[0]
if 'Debian, Ubuntu or Kali linux' in error_text:
if not quiet:
self.print_warning(error_text)
if isfile('/bin/' + software_name) or isfile('/sbin/' + software_name) or \
isfile('/usr/bin/' + software_name) or isfile('/usr/sbin/' + software_name) or \
isfile('/usr/local/bin/' + software_name) or isfile('/usr/local/sbin/' + software_name):
return True
else:
return False
else:
if not quiet:
self.print_error(error_text)
if exit_on_failure:
exit(exit_code)
return False
# endregion
# region Process control functions
@staticmethod
def check_process(process_name: str = 'systemd') -> int:
"""
Check process is running
:param process_name: Process name string (default: 'systemd')
:return: Process ID integer (example: 1)
"""
for process in ps.process_iter():
if 'python' in process.name():
for argument in process.cmdline():
if process_name in argument:
return int(process.pid)
if process.name() == process_name:
return int(process.pid)
return -1
def get_process_pid(self, process_name: str = 'systemd') -> int:
"""
Get process ID
:param process_name: Process name string (default: 'apache2')
:return: Process ID integer (example: 1234)
"""
return self.check_process(process_name)
def get_process_pid_by_listen_port(self,
listen_port: int = 80,
listen_address: Union[None, str] = None,
listen_proto: Union[None, str] = None,
exit_on_failure: bool = True,
exit_code: int = 27,
quiet: bool = False) -> Union[None, List[int]]:
"""
Get list of processes ID by listen TCP or UDP port
:param listen_port: Listening TCP or UDP port integer (default: 80)
:param listen_address: Listening IPv4 or IPv6 address string (default: None)
:param listen_proto: Listening protocol string 'tcp' or 'udp' (default: 'tcp')
:param exit_on_failure: Exit in case of error (default: False)
:param exit_code: Set exit code integer (default: 27)
:param quiet: Quiet mode, if True no console output (default: False)
:return: List of processes ID by listen TCP or UDP port
"""
pids: List[int] = list()
try:
assert 1 < listen_port < 65535, \
'Bad listen port: ' + self.error_text(str(listen_port)) + \
' listen port must be in range: ' + self.info_text('1 - 65535')
assert (listen_proto is None or listen_proto == 'tcp' or listen_proto == 'udp'), \
'Bad value in listen proto: ' + self.error_text(str(listen_proto)) + \
' listen proto must be ' + self.info_text('None' + ' or ' + '\'tcp\'' + ' or ' + '\'udp\'')
if listen_proto is None:
listen_proto = 'tcp'
for process in ps.process_iter():
connections = process.connections()
for connection in connections:
(address, port) = connection.laddr
if connection.type == sock.SOCK_STREAM and connection.status == ps.CONN_LISTEN:
proto = 'tcp'
elif connection.type == sock.SOCK_DGRAM:
proto = 'udp'
else:
continue
if listen_address is not None:
if address == listen_address and proto == listen_proto \
and port == listen_port and process.pid is not None:
pids.append(process.pid)
else:
if proto == listen_proto and port == listen_port and process.pid is not None:
pids.append(process.pid)
return pids
except ps.NoSuchProcess:
return pids
except AssertionError as Error:
if not quiet:
self.print_error(Error.args[0])
if exit_on_failure:
exit(exit_code)
return None
def kill_process(self, process_pid: int) -> bool:
"""
Kill process by ID
:param process_pid: Process ID integer
:return: True if kill process or False if not
"""
try:
if self.get_platform().startswith('Windows'):
sub.check_output('taskkill /F /PID ' + str(process_pid), shell=True)
else:
process = ps.Process(process_pid)
process.terminate()
return True
except ps.NoSuchProcess:
return False
def kill_process_by_name(self, process_name: str = 'apache2') -> bool:
"""
Kill process by name
:param process_name: Process name string (default: apache2)
:return: True if kill process or False if not
"""
if self.get_platform().startswith('Windows'):
sub.check_output('taskkill /F /IM ' + process_name, shell=True)
return True
else:
process_pid = self.get_process_pid(process_name)
if process_pid != -1:
while (self.get_process_pid(process_name) != -1):
self.kill_process(process_pid)
return True
else:
return False
def kill_processes_by_listen_port(self,
listen_port: int = 80,
listen_address: Union[None, str] = None,
listen_proto: str = 'tcp') -> bool:
"""
Kill processes by listen TCP or UDP port
:param listen_port: Listening TCP or UDP port integer (default: 80)
:param listen_address: Listening IPv4 or IPv6 address string (default: None)
:param listen_proto: Listening protocol string 'tcp' or 'udp' (default: 'tcp')
:return: True if kill all processes | |
<reponame>tIoImIcIaItI/linux-server-configuration<gh_stars>0
import logging
import sys
import json
import random
import string
import httplib2
import requests
from flask import Flask, flash, jsonify, make_response, redirect, \
render_template, request, session as login_session, url_for
from oauth2client.client import FlowExchangeError, flow_from_clientsecrets
from oauth2client.client import OAuth2WebServerFlow
from models import session, Category, Item
from permissions import Permissions
from users import UserUtils
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
app = Flask(__name__)
# -----------------------------------------------------------------------------
# USERS, AUTHENTICATION, and AUTHORIZATION
# TODO: finish moving all environment-sensitive values out of app code
config = json.loads(open('/webapps/catalog/client_secrets.json', 'r').read())
CLIENT_ID = config['web']['client_id']
CLIENT_SECRET = config['web']['client_secret']
APPLICATION_NAME = "Item Catalog"
@app.route(
'/gconnect',
methods=['POST'])
def gconnect():
"""
Initiates user authentication via Google
"""
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = OAuth2WebServerFlow(
client_id=CLIENT_ID, client_secret=CLIENT_SECRET,
scope='https://www.googleapis.com/auth/userinfo.profile')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and \
gplus_id == stored_gplus_id and \
'user_id' in login_session:
return UserUtils.respond_with_preauthentication_url()
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
print data
# Add user data fields to the session dictionary
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# Either create or retrieve the associated User
# from the data store, by unique email
user = UserUtils.try_get_user_by_email(data['email'])
if not user:
user = UserUtils.create_user(login_session)
user_id = user.id
login_session['user_id'] = user_id
handle = UserUtils.get_user_handle()
flash("you are now logged in as %s" % handle)
print "done!"
return UserUtils.respond_with_preauthentication_url()
@app.route(
'/gdisconnect')
def gdisconnect():
"""
Revokes a current Google user's token and resets their login_session
"""
UserUtils.set_preauthentication_url()
if 'access_token' not in login_session:
return UserUtils.return_to_preauthentication_url()
access_token = login_session['access_token']
print 'In gdisconnect access token is %s', access_token
print 'User name is: '
print login_session['username']
if access_token is None:
print 'Access Token is None'
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % \
login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print 'result is '
print result
if result['status'] != '200':
response = make_response(
json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
return UserUtils.return_to_preauthentication_url()
@app.route(
'/login')
def get_login_page():
"""
Presents the user with the available authentication services, and
creates a unique token
"""
UserUtils.set_preauthentication_url()
# Create anti-forgery state token
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(32))
login_session['state'] = state
return render_template(
'login.html',
STATE=state)
# -----------------------------------------------------------------------------
# CATEGORY HTML ENDPOINTS
def extract_and_validate_category_name(form):
"""
Returns the value and validaton error of the
category name contained in the form
:rtype: (String, String)
"""
name = form.get('name')
name_error = None
if not name:
name_error = 'Name is required'
elif len(name) < 2 or len(name) > 80:
name_error = 'Name must be between 2 and 80 characters'
return name, name_error
@app.route(
'/categories/create',
methods=['GET', 'POST'])
def create_category():
"""
HTML endpoint providing a form to create a new category
"""
if not UserUtils.is_authenticated():
UserUtils.set_preauthentication_url()
flash('sign in to create categories')
return redirect('/login')
if request.method == 'POST':
# Extract and validate the form inputs
(name, name_error) = \
extract_and_validate_category_name(request.form)
if name_error:
return UserUtils.render_user_template(
'category_create.html',
page_title="New Category",
name=name, name_error=name_error)
# Create the item in the data store
item = Category(
name=name,
user_id=UserUtils.get_authenticated_user_id())
session.add(item)
session.commit()
flash('category created')
return redirect(url_for(
'get_category_by_id',
category_id=item.id))
else:
return UserUtils.render_user_template(
'category_create.html',
page_title="New Category")
@app.route('/')
@app.route('/categories/')
def get_categories():
"""
HTML endpoint providing a list of all categories
"""
try:
items = session.query(Category).all()
return UserUtils.render_user_template(
'category_list.html',
categories=items,
page_title="Category List")
except Exception as ex:
logging.exception("get_categories")
return "%s %s" % (ex.message, ex.args)
@app.route(
'/categories/<int:category_id>/')
def get_category_by_id(category_id):
"""
HTML endpoint providing details for a given category
"""
category = session.query(Category).filter_by(id=category_id).one()
items = session.query(Item).filter_by(category_id=category_id).all()
return UserUtils.render_user_template(
'category_items.html',
category=category,
items=items,
page_title="%s Category" % category.name,
can=Permissions.get_user_permissions_for_category(category))
@app.route(
'/categories/<int:category_id>/update',
methods=['GET', 'POST'])
def update_category_by_id(category_id):
"""
HTML endpoint providing a form to edit a category
"""
if not UserUtils.is_authenticated():
UserUtils.set_preauthentication_url()
flash('sign in to edit categories')
return redirect('/login')
category = session.query(Category).filter_by(id=category_id).one()
if not Permissions.get_user_permissions_for_category(category).update:
flash('you may edit only categories you created')
return redirect(url_for(
'get_categories'))
if request.method == 'POST':
# Extract and validate the form inputs
(name, name_error) = \
extract_and_validate_category_name(request.form)
if name_error:
return UserUtils.render_user_template(
'category_update.html',
category=category,
page_title="%s %s Category" % ("Edit", category.name),
name=name,
name_error=name_error)
# Create the item in the data store
category.name = name
session.add(category)
session.commit()
flash('category updated')
return redirect(url_for(
'get_category_by_id',
category_id=category_id))
else:
return UserUtils.render_user_template(
'category_update.html',
category=category,
page_title="%s %s Category" % ("Edit", category.name),
name=category.name)
@app.route(
'/categories/<int:category_id>/delete',
methods=['GET', 'POST'])
def delete_category_by_id(category_id):
"""
HTML endpoint providing a form to delete a category
"""
if not UserUtils.is_authenticated():
UserUtils.set_preauthentication_url()
flash('sign in to delete categories')
return redirect('/login')
category = session.query(Category).filter_by(id=category_id).one()
if not Permissions.get_user_permissions_for_category(category).delete:
flash('you may delete only empty categories you created')
return redirect(url_for(
'get_categories'))
if request.method == 'POST':
session.delete(category)
session.commit()
flash('category deleted')
return redirect(url_for(
'get_categories'))
else:
return UserUtils.render_user_template(
'category_delete.html',
category=category,
page_title="%s %s Category" % ("Delete", category.name))
# -----------------------------------------------------------------------------
# CATEGORY JSON ENDPOINTS
@app.route(
'/api/categories/')
def api_get_categories():
"""
API endpoint providing a list of all categories
"""
categories = session.query(Category).all()
def serialize(c):
"""
Provides a representation of a category,
suitable for conversion to JSON format
"""
return {
'id': c.id,
'user_id': c.user_id,
'name': c.name,
'items_url': url_for(
'api_get_items_by_category_id', category_id=c.id)
}
return jsonify(
categories=[serialize(category) for category in categories])
@app.route(
'/api/categories/<int:category_id>/')
def api_get_category(category_id):
"""
API endpoint providing details for a given category
"""
category = \
session.query(Category).filter_by(id=category_id).one()
items = \
session.query(Item).filter_by(category_id=category_id).all()
def serialize_item(i):
"""
Provides a representation of an item,
suitable for conversion to JSON format
"""
return {
'id': i.id,
'user_id': i.user_id,
'category_id': i.category_id,
'url': url_for(
'api_get_item_by_id',
category_id=i.category_id, item_id=i.id),
'title': i.title,
'description': i.description
}
items = [serialize_item(item) for item in items]
def serialize(c):
"""
Provides a representation of a category,
suitable for conversion to JSON format
"""
return {
'id': c.id,
'user_id': c.user_id,
'name': c.name,
'items': items
}
return jsonify(
category=serialize(category))
# -----------------------------------------------------------------------------
# ITEM HTML ENDPOINTS
def extract_and_validate_item_title(form):
"""
Returns the value and validaton error of the
item title contained in the form
:rtype: (String, String)
"""
title = form.get('title')
title_error = None
if not title:
title_error = 'Title is required'
elif len(title) < 2 or len(title) > 80:
title_error = 'Title must be between 2 and 80 characters'
return title, title_error
def extract_and_validate_item_description(form):
"""
Returns the value and validaton error of the
item description contained in the form
:rtype: (String, String)
"""
description = form.get('description')
description_error = None
if not description:
description_error = 'Description is required'
elif len(description) < 2 or len(description) > 80:
description_error = 'Description must be between 2 and 250 characters'
return description, description_error
@app.route(
'/categories/<int:category_id>/items/create',
methods=['GET', 'POST'])
def create_item(category_id):
"""
HTML endpoint providing a form to create a new item within a category
"""
if not UserUtils.is_authenticated():
UserUtils.set_preauthentication_url()
flash('sign in to create an item')
return redirect('/login')
category = \
session.query(Category).filter_by(id=category_id).one()
if request.method == 'POST':
# Extract and validate the form inputs
(title, title_error) = \
extract_and_validate_item_title(request.form)
(description, description_error) = \
extract_and_validate_item_description(request.form)
if title_error or description_error:
return UserUtils.render_user_template(
'item_create.html',
category=category,
category_id=category_id,
title=title,
title_error=title_error,
description=description,
description_error=description_error)
# Create the item in the data store
item = Item(
title=title,
description=description,
category_id=category_id,
user_id=UserUtils.get_authenticated_user_id())
session.add(item)
session.commit()
flash('item created')
return redirect(url_for(
'get_category_by_id',
category_id=category_id))
else:
return UserUtils.render_user_template(
'item_create.html',
category=category,
category_id=category_id)
@app.route(
'/categories/<int:category_id>/items/<int:item_id>/')
def get_item_by_id(category_id, item_id):
"""
HTML endpoint providing details | |
from typing import Union, List
import copy
import math
import numpy as np
"""
Principles:
- geometry objects are defined by the minimum required information
- Points are made of coordinates (floats), everything else is based on Points except for Vectors
"""
class Point:
def __init__(self, x: float, y: float, z: float = 0):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def __str__(self):
return self.pretty_print()
def pretty_print(self, indentation=''):
return "{ind}{x}, {y}, {z} (Point)".format(x=self.x, y=self.y, z=self.z, ind=indentation)
def coordinates(self):
return self.x, self.y, self.z
def __sub__(self, other):
if isinstance(other, Point):
return Vector(x=self.x - other.x,
y=self.y - other.y,
z=self.z - other.z)
elif isinstance(other, Vector):
return Point(x=self.x - other.x,
y=self.y - other.y,
z=self.z - other.z)
def __add__(self, other):
if isinstance(other, Vector):
return Point(x=self.x + other.x,
y=self.y + other.y,
z=self.z + other.z)
def __eq__(self, other):
if self.x == other.x and self.y == other.y and self.z == other.z:
return True
else:
return False
class Vector:
def __init__(self, x, y, z: float = 0):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def __str__(self):
return self.pretty_print()
def pretty_print(self, indentation=''):
return "{ind}{x}, {y}, {z} (Vector)".format(x=self.x, y=self.y, z=self.z, ind=indentation)
def coordinates(self):
return self.x, self.y, self.z
def length(self) -> float:
return math.sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
def unitize(self):
return Vector(self.x / self.length(), self.y / self.length(), self.z / self.length())
def cross_product(self, vector2):
product_x = self.y * vector2.z - self.z * vector2.y
product_y = -self.x * vector2.z + self.z * vector2.x
product_z = self.x * vector2.y - self.y * vector2.x
return Vector(product_x, product_y, product_z)
def scalar_product(self, vector2):
product = 0
for xyz in [0, 1, 2]:
product += self.coordinates()[xyz] * vector2.coordinates()[xyz]
return product
def __mul__(self, other):
if isinstance(other, Vector):
# scalar (dot) product
product = 0
for xyz in [0, 1, 2]:
product += self.coordinates()[xyz] * other.coordinates()[xyz]
return product
elif isinstance(other, (float, int)):
return Vector(self.x * other, self.y * other, self.z * other)
def angle(self, vector2):
# angle between the instance vector and the given vector in degrees
# always positive and smaller or equal to 180°
return math.degrees(math.acos(self.scalar_product(vector2) / self.length() / vector2.length()))
def __add__(self, other):
return Vector(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
def __truediv__(self, other: float):
return self * other ** -1
def __eq__(self, other):
if self.x == other.x and self.y == other.y and self.z == other.z:
return True
else:
return False
class Plane:
def __init__(self, normal: Vector, point: Point):
self.normal = normal
self.point = point
def __str__(self):
return self.pretty_print()
def pretty_print(self, indentation=''):
return '{ind}Plane:\n'.format(ind=indentation) +\
'{ind}|--Normal: {s}\n'.format(s=self.normal.pretty_print(), ind=indentation) +\
'{ind}`--Point: {e}\n'.format(e=self.point.pretty_print(), ind=indentation)
def intersect(self, other: Union['Ray', 'Plane']):
if isinstance(other, Ray):
# solve the linear equation system aX = b
plane_eq, plane_ord = self.get_equation(standardize=True)
ray_eq, ray_ord = other.get_equation(standardize=True)
a = np.append(plane_eq, ray_eq, axis=0)
b = np.append(plane_ord, ray_ord, axis=0)
try:
solution = np.linalg.solve(a, b)
except np.linalg.LinAlgError:
# parallel
return None
return Point(
x=solution[0, 0],
y=solution[1, 0],
z=solution[2, 0]
)
if isinstance(other, Plane):
# direction of intersection ray
vector = self.normal.cross_product(other.normal)
if vector == Vector(0, 0, 0):
# parallel
return None
else:
# get largest absolute coordinate value
xyz = [abs(vector.x), abs(vector.y), abs(vector.z)]
set_0_coord = xyz.index(max(xyz))
# set this coordinate to 0 to solve the equation of the two planes
eq1, ord1 = self.get_equation(standardize=True)
eq2, ord2 = other.get_equation(standardize=True)
a = np.append(eq1, eq2, axis=0)
b = np.append(ord1, ord2, axis=0)
# delete the corresponding column from the matrix
i = [True, True, True]
i[set_0_coord] = False
a = a[:, i]
# we should be able to solve this, because parallel case was checked already
solution = np.linalg.solve(a, b)
if set_0_coord == 0:
point = Point(0, solution[0, 0], solution[1, 0])
elif set_0_coord == 1:
point = Point(solution[0, 0], 0, solution[1, 0])
else:
point = Point(solution[0, 0], solution[1, 0], 0)
return Ray(
vector=vector,
point=point
)
def get_equation(self, standardize=False):
# http://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfPlanes.aspx
a = self.normal.x
b = self.normal.y
c = self.normal.z
d = a * self.point.x + b * self.point.y + c * self.point.z
if standardize:
# return the coefficients of the equation in this form aX + bY + cZ = d
return (
np.array([
[a, b, c]
]),
np.array([
[d]
])
)
return {
'a': a, 'b': b, 'c': c, 'd': d
}
def print_equation(self):
return '{a}x + {b}y + {c}z = {d}'.format(**self.get_equation())
class Ray:
def __init__(self, vector: Vector, point: Point):
self.vector = vector
self.point = point
def get_equation(self, standardize=False):
# http://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfLines.aspx
x0 = self.point.x
y0 = self.point.y
z0 = self.point.z
a = self.vector.x
b = self.vector.y
c = self.vector.z
if standardize:
# return the coefficients of the equations in this form aX + bY + cZ + d = 0
if a == 0:
# 1X + 0Y + 0Z = x0
a1, b1, c1, d1 = 1, 0, 0, x0
if b == 0:
# 0X + 1Y + 0Z = y0
a2, b2, c2, d2 = 0, 1, 0, y0
elif c == 0:
# 0X + 0Y + 1Z = z0
a2, b2, c2, d2 = 0, 0, 1, z0
else:
# 0X + cY - bZ = y0*c - z0*b
a2, b2, c2, d2 = 0, c, -b, y0 * c - z0 * b
elif b == 0:
# 0X + 1Y + 0Z = y0
a1, b1, c1, d1 = 0, 1, 0, y0
if c == 0:
# 0X + 0Y + 1Z = z0
a2, b2, c2, d2 = 0, 0, 1, z0
else:
# cX + 0Y - aZ = x0*c - z0*a
a2, b2, c2, d2 = c, 0, -a, x0 * c - z0 * a
else:
# bX - aY + 0Z = x0*b - y0*a
a1, b1, c1, d1 = b, -a, 0, x0 * b - y0 * a
if c == 0:
# 0X + 0Y + 1Z = z0
a2, b2, c2, d2 = 0, 0, 1, z0
else:
# cX + 0Y - aZ = x0*c - z0*a
a2, b2, c2, d2 = c, 0, -a, x0 * c - z0 * a
return (
np.array([
[a1, b1, c1],
[a2, b2, c2]
]),
np.array([
[d1],
[d2]
])
)
else:
return {
'x0': x0, 'y0': y0, 'z0': z0, 'a': a, 'b': b, 'c': c,
}
def print_equation(self):
coeffs = self.get_equation()
if coeffs['a'] == 0:
eq1 = 'x = {x0}'.format(**coeffs)
if coeffs['b'] == 0:
eq2 = 'y = {y0}, '.format(**coeffs)
elif coeffs['c'] == 0:
eq2 = 'z = {z0}, '.format(**coeffs)
else:
eq2 = '(y - {y0}) / {b} = (z - {z0}) / {c}'.format(**coeffs)
elif coeffs['b'] == 0:
eq1 = 'y = {y0}'.format(**coeffs)
if coeffs['c'] == 0:
eq2 = 'z = {z0}, '.format(**coeffs)
else:
eq2 = '(x - {x0}) / {a} = (z - {z0}) / {c}'.format(**coeffs)
else:
eq1 = '(x - {x0}) / {a} = (y - {y0}) / {b}'.format(**coeffs)
if coeffs['c'] == 0:
eq2 = 'z = {z0}, '.format(**coeffs)
else:
eq2 = '(x - {x0}) / {a} = (z - {z0}) / {c}'.format(**coeffs)
return eq1 + '\n' + eq2
def intersect(self, other: Plane) -> Point:
return other.intersect(self)
class Line:
def __init__(self, start: Point, end: Point):
self.start = start
self.end = end
def __str__(self):
return self.pretty_print()
def pretty_print(self, indentation=''):
return '{ind}Line:\n'.format(ind=indentation) +\
'{ind}|--Start: {s}\n'.format(s=self.start.pretty_print(), ind=indentation) +\
'{ind}`--End: {e}\n'.format(e=self.end.pretty_print(), ind=indentation)
def length(self):
return self.to_vector().length()
def to_points(self):
return [self.start, self.end]
def to_vector(self, reverse=False):
if reverse:
return Vector(x=self.start.x - self.end.x,
y=self.start.y - self.end.y,
z=self.start.z - self.end.z)
else:
return Vector(x=self.end.x - self.start.x,
y=self.end.y - self.start.y,
z=self.end.z - self.start.z)
def midpoint(self) -> Point:
return Point(
x=(self.start.x + self.end.x) / 2,
y=(self.start.y + self.end.y) / 2,
z=(self.start.z + self.end.z) / 2,
)
def __eq__(self, other):
if self.start == other.start and self.end == other.end:
return True
elif self.start == other.end and self.end == other.start:
return True
else:
return False
def to_ray(self) -> Ray:
return Ray(
vector=self.to_vector(),
point=self.start
)
def flip(self) -> 'Line':
return Line(start=self.end, end=self.start)
class Rectangle:
def __init__(self, side: Line, external_point: Point):
self.side = side
| |
"""
Routines for the analysis of proton radiographs. These routines can be broadly
classified as either creating synthetic radiographs from prescribed fields or
methods of 'inverting' experimentally created radiographs to reconstruct the
original fields (under some set of assumptions).
"""
__all__ = [
"SyntheticProtonRadiograph",
]
import astropy.constants as const
import astropy.units as u
import numpy as np
import sys
import warnings
from tqdm import tqdm
from plasmapy import particles
from plasmapy.formulary.mathematics import rot_a_to_b
from plasmapy.particles import Particle
from plasmapy.plasma.grids import AbstractGrid
from plasmapy.simulation.particle_integrators import boris_push
def _coerce_to_cartesian_si(pos):
"""
Takes a tuple of `astropy.unit.Quantity` values representing a position
in space in either Cartesian, cylindrical, or spherical coordinates, and
returns a numpy array representing the same point in Cartesian
coordinates and units of meters.
"""
# Auto-detect geometry based on units
geo_units = [x.unit for x in pos]
if geo_units[2].is_equivalent(u.rad):
geometry = "spherical"
elif geo_units[1].is_equivalent(u.rad):
geometry = "cylindrical"
else:
geometry = "cartesian"
# Convert geometrical inputs between coordinates systems
pos_out = np.zeros(3)
if geometry == "cartesian":
x, y, z = pos
pos_out[0] = x.to(u.m).value
pos_out[1] = y.to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "cylindrical":
r, t, z = pos
r = r.to(u.m)
t = t.to(u.rad).value
z = z.to(u.m)
pos_out[0] = (r * np.cos(t)).to(u.m).value
pos_out[1] = (r * np.sin(t)).to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "spherical":
r, t, p = pos
r = r.to(u.m)
t = t.to(u.rad).value
p = p.to(u.rad).value
pos_out[0] = (r * np.sin(t) * np.cos(p)).to(u.m).value
pos_out[1] = (r * np.sin(t) * np.sin(p)).to(u.m).value
pos_out[2] = (r * np.cos(t)).to(u.m).value
return pos_out
class SyntheticProtonRadiograph:
r"""
Represents a charged particle radiography experiment with simulated or
calculated E and B fields given at positions defined by a grid of spatial
coordinates. The particle source and detector plane are defined by vectors
from the origin of the grid.
Parameters
----------
grid : `~plasmapy.plasma.grids.AbstractGrid` or subclass thereof
A Grid object containing the required quantities [E_x, E_y, E_z, B_x, B_y, B_z].
If any of these quantities are missing, a warning will be given and that
quantity will be assumed to be zero everywhere.
source : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the location
of the particle source. This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
detector : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center
of the detector plane. The vector from the source point to this
point defines the normal vector of the detector plane. This vector
can also be specified in cartesian, cylindrical, or spherical
coordinates (see the `source` keyword).
detector_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the detector plane. By default, the horizontal axis in the
detector plane is defined to be perpendicular to both the
source-to-detector vector and the z-axis (unless the source-to-detector axis
is parallel to the z axis, in which case the horizontal axis is the x-axis).
The detector vertical axis is then defined
to be orthogonal to both the source-to-detector vector and the
detector horizontal axis.
verbose : bool, optional
If true, updates on the status of the program will be printed
into the standard output while running.
"""
def __init__(
self,
grid: AbstractGrid,
source: u.m,
detector: u.m,
detector_hdir=None,
verbose=True,
):
# self.grid is the grid object
self.grid = grid
# self.grid_arr is the grid positions in si units. This is created here
# so that it isn't continously called later
self.grid_arr = grid.grid.to(u.m).value
self.verbose = verbose
# A list of wire meshes added to the grid with add_wire_mesh
# Particles that would hit these meshes will be removed at runtime
# by _apply_wire_mesh
self.mesh_list = []
# ************************************************************************
# Setup the source and detector geometries
# ************************************************************************
self.source = _coerce_to_cartesian_si(source)
self.detector = _coerce_to_cartesian_si(detector)
self._log(f"Source: {self.source} m")
self._log(f"Detector: {self.detector} m")
# Calculate normal vectors (facing towards the grid origin) for both
# the source and detector planes
self.src_n = -self.source / np.linalg.norm(self.source)
self.det_n = -self.detector / np.linalg.norm(self.detector)
# Vector directly from source to detector
self.src_det = self.detector - self.source
# Magnification
self.mag = 1 + np.linalg.norm(self.detector) / np.linalg.norm(self.source)
self._log(f"Magnification: {self.mag}")
# Check that source-detector vector actually passes through the grid
if not self.grid.vector_intersects(self.source * u.m, self.detector * u.m):
raise ValueError(
"The vector between the source and the detector "
"does not intersect the grid provided!"
)
# Determine the angle above which particles will not hit the grid
# these particles can be ignored until the end of the simulation,
# then immediately advanced to the detector grid with their original
# velocities
self.max_theta_hit_grid = self._max_theta_hit_grid()
# ************************************************************************
# Define the detector plane
# ************************************************************************
# Load or calculate the detector hdir
if detector_hdir is not None:
self.det_hdir = detector_hdir / np.linalg.norm(detector_hdir)
else:
self.det_hdir = self._default_detector_hdir()
# Calculate the detector vdir
ny = np.cross(self.det_hdir, self.det_n)
self.det_vdir = -ny / np.linalg.norm(ny)
# ************************************************************************
# Validate the E and B fields
# ************************************************************************
req_quantities = ["E_x", "E_y", "E_z", "B_x", "B_y", "B_z"]
self.grid.require_quantities(req_quantities, replace_with_zeros=True)
for rq in req_quantities:
# Check that there are no infinite values
if not np.isfinite(self.grid[rq].value).all():
raise ValueError(
f"Input arrays must be finite: {rq} contains "
"either NaN or infinite values."
)
# Check that the max values on the edges of the arrays are
# small relative to the maximum values on that grid
#
# Array must be dimensionless to re-assemble it into an array
# of max values like this
arr = np.abs(self.grid[rq]).value
edge_max = np.max(
np.array(
[
np.max(arr[0, :, :]),
np.max(arr[-1, :, :]),
np.max(arr[:, 0, :]),
np.max(arr[:, -1, :]),
np.max(arr[:, :, 0]),
np.max(arr[:, :, -1]),
]
)
)
if edge_max > 1e-3 * np.max(arr):
unit = grid.recognized_quantities[rq].unit
warnings.warn(
"Fields should go to zero at edges of grid to avoid "
f"non-physical effects, but a value of {edge_max:.2E} {unit} was "
f"found on the edge of the {rq} array. Consider applying a "
"envelope function to force the fields at the edge to go to "
"zero.",
RuntimeWarning,
)
def _default_detector_hdir(self):
"""
Calculates the default horizontal unit vector for the detector plane
(see __init__ description for details)
"""
# Create unit vectors that define the detector plane
# Define plane horizontal axis
if np.allclose(np.abs(self.det_n), np.array([0, 0, 1])):
nx = np.array([1, 0, 0])
else:
nx = np.cross(np.array([0, 0, 1]), self.det_n)
nx = nx / np.linalg.norm(nx)
return nx
def _max_theta_hit_grid(self):
r"""
Using the grid and the source position, compute the maximum particle
theta that will impact the grid. This value can be used to determine
which particles are worth tracking.
"""
ind = 0
theta = np.zeros([8])
for x in [0, -1]:
for y in [0, -1]:
for z in [0, -1]:
# Source to grid corner vector
vec = self.grid_arr[x, y, z, :] - self.source
# Calculate angle between vec and the source-to-detector
# axis, which is the central axis of the particle beam
theta[ind] = np.arccos(
np.dot(vec, self.src_det)
/ np.linalg.norm(vec)
/ np.linalg.norm(self.src_det)
)
ind += 1
return np.max(theta)
def _log(self, msg):
if self.verbose:
print(msg)
# Define some constants so they don't get constantly re-evaluated
_c = const.c.si.value
# *************************************************************************
# Create mesh
# *************************************************************************
def add_wire_mesh(
self, location, extent, nwires, wire_diameter, mesh_hdir=None, mesh_vdir=None
):
"""
Add a wire mesh grid between the particle source and the object grid
that blocks particles whose paths intersect the wires.
Parameters
----------
location : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center of the
mesh grid. This location must be between the source and the
object grid.
This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* | |
"""
Common tests shared by test_str, test_unicode, test_userstring and test_string.
"""
import unittest, string, sys, struct
from test import test_support
from UserList import UserList
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123L]
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class CommonTest(unittest.TestCase):
# This testcase contains test that can be used in all
# stringlike classes. Currently this is str, unicode
# UserString and the string module.
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtype(value))
for (key, value) in obj.iteritems()
])
else:
return obj
# check that object.method(*args) returns result
def checkequal(self, result, object, methodname, *args):
result = self.fixtype(result)
object = self.fixtype(object)
args = self.fixtype(args)
realresult = getattr(object, methodname)(*args)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if object == realresult:
class subtype(self.__class__.type2test):
pass
object = subtype(object)
realresult = getattr(object, methodname)(*args)
self.assert_(object is not realresult)
# check that object.method(*args) raises exc
def checkraises(self, exc, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
self.assertRaises(
exc,
getattr(object, methodname),
*args
)
# call object.method(*args) without any checks
def checkcall(self, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
getattr(object, methodname)(*args)
def test_hash(self):
# SF bug 1054139: += optimization was not invalidating cached hash value
a = self.type2test('DNSSEC')
b = self.type2test('')
for c in a:
b += c
hash(b)
self.assertEqual(hash(a), hash(b))
def test_capitalize(self):
self.checkequal(' hello ', ' hello ', 'capitalize')
self.checkequal('Hello ', 'Hello ','capitalize')
self.checkequal('Hello ', 'hello ','capitalize')
self.checkequal('Aaaa', 'aaaa', 'capitalize')
self.checkequal('Aaaa', 'AaAa', 'capitalize')
self.checkraises(TypeError, 'hello', 'capitalize', 42)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(2, 'aaa', 'count', 'a', 1)
self.checkequal(0, 'aaa', 'count', 'a', 10)
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(1, 'aaa', 'count', 'a', 0, 1)
self.checkequal(3, 'aaa', 'count', 'a', 0, 10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkequal(3, 'aaa', 'count', '', 1)
self.checkequal(1, 'aaa', 'count', '', 3)
self.checkequal(0, 'aaa', 'count', '', 10)
self.checkequal(2, 'aaa', 'count', '', -1)
self.checkequal(4, 'aaa', 'count', '', -10)
self.checkequal(1, '', 'count', '')
self.checkequal(0, '', 'count', '', 1, 1)
self.checkequal(0, '', 'count', '', sys.maxint, 0)
self.checkequal(0, '', 'count', 'xx')
self.checkequal(0, '', 'count', 'xx', 1, 1)
self.checkequal(0, '', 'count', 'xx', sys.maxint, 0)
self.checkraises(TypeError, 'hello', 'count')
self.checkraises(TypeError, 'hello', 'count', 42)
# For a variety of combinations,
# verify that str.count() matches an equivalent function
# replacing all occurrences and then differencing the string lengths
charset = ['', 'a', 'b']
digits = 7
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
n = len(i)
for j in teststrings:
r1 = i.count(j)
if j:
r2, rem = divmod(n - len(i.replace(j, '')), len(j))
else:
r2, rem = len(i)+1, 0
if rem or r1 != r2:
self.assertEqual(rem, 0, '%s != 0 for %s' % (rem, i))
self.assertEqual(r1, r2, '%s != %s for %s' % (r1, r2, i))
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkequal(0, 'abc', 'find', '', 0)
self.checkequal(3, 'abc', 'find', '', 3)
self.checkequal(-1, 'abc', 'find', '', 4)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'find', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'find')
self.checkraises(TypeError, 'hello', 'find', 42)
self.checkequal(0, '', 'find', '')
self.checkequal(-1, '', 'find', '', 1, 1)
self.checkequal(-1, '', 'find', '', sys.maxint, 0)
self.checkequal(-1, '', 'find', 'xx')
self.checkequal(-1, '', 'find', 'xx', 1, 1)
self.checkequal(-1, '', 'find', 'xx', sys.maxint, 0)
# issue 7458
self.checkequal(-1, 'ab', 'find', 'xxx', sys.maxsize + 1, 0)
# For a variety of combinations,
# verify that str.find() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
for j in teststrings:
loc = i.find(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkequal(3, 'abc', 'rfind', '', 0)
self.checkequal(3, 'abc', 'rfind', '', 3)
self.checkequal(-1, 'abc', 'rfind', '', 4)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'rfind', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rfind', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rfind')
self.checkraises(TypeError, 'hello', 'rfind', 42)
# For a variety of combinations,
# verify that str.rfind() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
for j in teststrings:
loc = i.rfind(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
# issue 7458
self.checkequal(-1, 'ab', 'rfind', 'xxx', sys.maxsize + 1, 0)
def test_index(self):
self.checkequal(0, 'abcdefghiabc', 'index', '')
self.checkequal(3, 'abcdefghiabc', 'index', 'def')
self.checkequal(0, 'abcdefghiabc', 'index', 'abc')
self.checkequal(9, 'abcdefghiabc', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghiabc', 'index', 'hib')
self.checkraises(ValueError, 'abcdefghiab', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', 8)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', -1)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'index', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'index')
self.checkraises(TypeError, 'hello', 'index', 42)
def test_rindex(self):
self.checkequal(12, 'abcdefghiabc', 'rindex', '')
self.checkequal(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequal(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequal(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghiabc', 'rindex', 'hib')
self.checkraises(ValueError, 'defghiabc', 'rindex', 'def', 1)
self.checkraises(ValueError, 'defghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, 8)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, -1)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'rindex', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rindex', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rindex')
self.checkraises(TypeError, 'hello', 'rindex', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi', 'expandtabs', 4)
self.checkequal(' a\n b', ' \ta\n\tb', 'expandtabs', 1)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxint < (1 << 32) and struct.calcsize('P') == 4:
self.checkraises(OverflowError,
'\ta\n\tb', 'expandtabs', sys.maxint)
def test_split(self):
self.checkequal(['this', 'is', 'the', 'split', 'function'],
'this is the split function', 'split')
# by whitespace
self.checkequal(['a', | |
" + str(desc_count) + " where hm_uuid = '" + parent_id + "'")
dbConn.commit()
return r_val
#an organ
elif entity_type == 'SAMPLE' and anc_entity_type == 'DONOR':
if isBlank(organ_code):
return Response("An id can't be created for a SAMPLE because the immediate ancestor is a DONOR and the SAMPLE was not supplied with an associated organ code (SAMPLE must be an organ to have a DONOR as a direct ancestor)", 400)
organ_code = organ_code.strip().upper()
curs.execute("select organ_count from hm_organs where donor_uuid = '" + parent_id + "' and organ_code = '" + organ_code + "'")
org_res = curs.fetchone()
if org_res is None:
curs.execute("insert into hm_organs (DONOR_UUID, ORGAN_CODE, ORGAN_COUNT) VALUE ('" + parent_id + "', '" + organ_code + "', 0)")
dbConn.commit()
org_count = 0
else:
org_count = org_res[0]
if not organ_code in MULTIPLE_ALLOWED_ORGANS:
if org_count >= 1:
return Response("Cannot add another organ of type " + organ_code + " to DONOR " + parent_id + " exists already.", 400)
if num_to_gen > 1:
return Response("Cannot create multiple submission ids for organ of type " + organ_code + ". " + str(num_to_gen) + " requested.", 400)
org_count = 1
r_val = [anc_submission_id + "-" + organ_code]
else:
r_val = []
for _ in range(0, num_to_gen):
org_count = org_count + 1
r_val.append(anc_submission_id + "-" + organ_code + padLeadingZeros(org_count, 2))
curs.execute("update hm_organs set organ_count = " + str(org_count) + " where donor_uuid = '" + parent_id + "' and organ_code = '" + organ_code + "'")
dbConn.commit()
return r_val
#error if remaining non-organ samples are not the descendants of a SAMPLE
elif entity_type == 'SAMPLE' and not anc_entity_type == 'SAMPLE':
return Response("Cannot create a submission id for a SAMPLE with a direct ancestor of " + anc_entity_type, 400)
elif entity_type == 'SAMPLE':
r_val = []
for _ in range(0, num_to_gen):
desc_count = desc_count + 1
r_val.append(anc_submission_id + "-" + str(desc_count))
curs.execute("update hm_uuids set descendant_count = " + str(desc_count) + " where hm_uuid = '" + parent_id + "'")
dbConn.commit()
return r_val
else:
return Response("Cannot create a submission id for an entity of type " + entity_type, 400)
#generate multiple ids, one for each display id in the displayIds array
def newUUIDs(self, parentIDs, entityType, userId, userEmail, nIds, organ_code = None, lab_code = None, file_info_array = None, base_dir_type = None):
#if entityType == 'DONOR':
gen_base_ids = entityType in HUBMAP_ID_ENTITY_TYPES
returnIds = []
now = time.strftime('%Y-%m-%d %H:%M:%S')
store_file_info = False
if entityType == 'FILE':
store_file_info = True
with self.lock:
#generate in batches
previousUUIDs = set()
previous_hubmap_ids = set()
gen_submission_ids = False
if entityType in SUBMISSION_ID_ENTITY_TYPES:
gen_submission_ids = True
for i in range(0, nIds, MAX_GEN_IDS):
insertVals = []
file_info_insert_vals = []
insertParents = []
numToGen = min(MAX_GEN_IDS, nIds - i)
#generate uuids
uuids = self.__nUniqueIds(numToGen, self.uuidGen, "HM_UUID", previousGeneratedIds=previousUUIDs)
if gen_base_ids:
hubmap_base_ids = self.__nUniqueIds(numToGen, self.hmidGen, "HUBMAP_BASE_ID", previousGeneratedIds=previous_hubmap_ids)
else:
hubmap_base_ids = [None] * numToGen
count_increase_q = None
submission_ids = None
if gen_submission_ids:
submission_ids = self.__create_submission_ids(numToGen, parentIDs[0], entityType, organ_code = organ_code, lab_code = lab_code)
if isinstance(submission_ids, Response):
return submission_ids
for n in range(0, numToGen):
insUuid = uuids[n]
previousUUIDs.add(insUuid)
thisId = {"uuid":insUuid}
if gen_base_ids:
ins_hubmap_base_id = hubmap_base_ids[n]
previous_hubmap_ids.add(ins_hubmap_base_id)
ins_display_hubmap_id = self.__display_hm_id(ins_hubmap_base_id)
thisId["hubmap_base_id"] = ins_hubmap_base_id
thisId["hubmap_id"] = ins_display_hubmap_id
else:
ins_hubmap_base_id = None
if gen_submission_ids:
thisId["submission_id"] = submission_ids[n]
insRow = (insUuid, ins_hubmap_base_id, entityType, now, userId, userEmail, submission_ids[n])
else:
insRow = (insUuid, ins_hubmap_base_id, entityType, now, userId, userEmail)
if store_file_info:
info_idx = i + n
file_path = file_info_array[info_idx]['path']
#replace any <uuid> tags in the file path with the generated uuid
file_path = file_path.replace('<uuid>', insUuid)
file_checksum = None
file_size = None
if 'checksum' in file_info_array[info_idx]:
file_checksum = file_info_array[info_idx]['checksum']
if 'size' in file_info_array[info_idx]:
file_size = file_info_array[info_idx]['size']
file_info_ins_row = (insUuid, file_path, file_checksum, file_size, base_dir_type)
#file_info_ins_row = (insUuid, file_path, file_checksum)
file_info_insert_vals.append(file_info_ins_row)
thisId['file_path'] = file_path
returnIds.append(thisId)
insertVals.append(insRow)
for parentId in parentIDs:
parRow = (insUuid, parentId)
insertParents.append(parRow)
with closing(self.hmdb.getDBConnection()) as dbConn:
with closing(dbConn.cursor()) as curs:
if gen_submission_ids:
curs.executemany(INSERT_SQL_WITH_SUBMISSION_ID, insertVals)
curs.execute(count_increase_q)
else:
curs.executemany(INSERT_SQL, insertVals)
if store_file_info:
curs.executemany(INSERT_FILE_INFO_SQL, file_info_insert_vals)
curs.executemany(INSERT_ANCESTOR_SQL, insertParents)
dbConn.commit()
return json.dumps(returnIds)
def nUniqueIds(self, nIds, idGenMethod, dbColumn):
return self.__nUniqueIds(nIds, idGenMethod, dbColumn)
#generate unique ids
#generates ids with provided id generation method and checks them against existing ids in the DB
#this method MUST BE CALLED FROM WITHIN a self.lock block
def __nUniqueIds(self, nIds, idGenMethod, dbColumn, previousGeneratedIds=set(), iteration=1):
ids = set()
lclPreviousIds = copy.deepcopy(previousGeneratedIds)
for _ in range(nIds):
newId = idGenMethod()
count = 1
while (newId in ids or newId in lclPreviousIds) and count < 100:
newId = idGenMethod()
count = count + 1
if count == 100:
raise Exception("Unable to generate an initial unique id for " + dbColumn + " after 100 attempts.")
ids.add(newId)
lclPreviousIds.add(newId)
dupes = self.__findDupsInDB(dbColumn, ids)
if dupes is not None and len(dupes) > 0:
n_iter = iteration + 1
if n_iter > 100:
raise Exception("Unable to generate unique id(s) for " + dbColumn + " after 100 attempts.")
replacements = self.__nUniqueIds(len(dupes), idGenMethod, dbColumn, previousGeneratedIds=lclPreviousIds, iteration=n_iter)
for val in dupes:
ids.remove(val[0])
for val in replacements:
ids.add(val)
return list(ids)
def __findDupsInDB(self, dbColumn, idSet):
sql = "select " + dbColumn + " from hm_uuids where " + dbColumn + " IN(" + listToCommaSeparated(idSet, "'", True) + ")"
with closing(self.hmdb.getDBConnection()) as dbConn:
with closing(dbConn.cursor()) as curs:
curs.execute(sql)
dupes = curs.fetchall()
return dupes
#which items in idSet are not in the database
def __findExclusionsInDB(self, dbColumn, idSet):
sql = "select " + dbColumn + " from ( "
first = True
for ex_id in idSet:
if first: first = False
else: sql = sql + " UNION ALL "
sql = sql + "(select '" + ex_id + "' as " + dbColumn + ")"
sql = sql + ") as list left join hm_uuids using (" + dbColumn + ") where hm_uuids." + dbColumn + " is null"
with closing(self.hmdb.getDBConnection()) as dbConn:
with closing(dbConn.cursor()) as curs:
curs.execute(sql)
excluded = curs.fetchall()
return excluded
def __display_hm_id(self, hm_base_id):
hubmap_id = 'HBM' + hm_base_id[0:3] + '.' + hm_base_id[3:7] + '.' + hm_base_id[7:]
return hubmap_id
def hmidGen(self):
nums1 = ''
nums2 = ''
alphs = ''
for _ in range(3):
nums1 = nums1 + secrets.choice(HMID_NUM_CHARS) #[random.randint(0,len(DOI_NUM_CHARS)-1)]
for _ in range(3):
nums2 = nums2 + secrets.choice(HMID_NUM_CHARS) #[random.randint(0,len(DOI_NUM_CHARS)-1)]
for _ in range(4):
alphs = alphs + secrets.choice(HMID_ALPHA_CHARS) #[random.randint(0,len(DOI_ALPHA_CHARS)-1)]
val = nums1 + alphs + nums2
return(val)
def getIdExists(self, hmid):
if not isValidHMId(hmid):
return Response("Invalid HuBMAP Id", 400)
tid = stripHMid(hmid)
if startsWithComponentPrefix(hmid):
return self.submission_id_exists(hmid.strip())
elif len(tid) == 10:
return self.base_id_exists(tid.upper())
elif len(tid) == 32:
return self.uuid_exists(tid.lower())
else:
return Response("Invalid HuBMAP Id (or empty or bad length)", 400)
#convert csv list of ancestor ids to a list
#convert hubmap base id to a hubmap id (display version)
def _convert_result_id_array(self, results, hmid):
if isinstance(results, list):
asize = len(results)
if asize == 0:
record = None
elif asize == 1:
record = results[0]
if not 'ancestor_ids' in record:
return record
ancestor_ids = record['ancestor_ids']
if ancestor_ids is None or ancestor_ids.strip() == '':
record.pop('ancestor_ids', '')
elif isinstance(ancestor_ids, str):
record['ancestor_ids'] = ancestor_ids.split(',')
if len(record['ancestor_ids']) == 1:
record['ancestor_id'] = record['ancestor_ids'][0]
else:
raise Exception("Unknown ancestor type for id:" + hmid)
if 'hubmap_base_id' in record:
if not record['hubmap_base_id'].strip() == '':
record['hubmap_id'] = self.__display_hm_id(record['hubmap_base_id'])
record.pop('hubmap_base_id', '')
else:
raise Exception("Multiple results exist for id:" + hmid)
return record
def getIdInfo(self, hmid):
if not isValidHMId(hmid):
return Response(hmid + " is not a valid id format", 400)
tidl = hmid.strip().lower()
tid = stripHMid(hmid)
if startsWithComponentPrefix(hmid):
sql = "select " + UUID_SELECTS + " from hm_uuids inner join hm_ancestors on hm_ancestors.descendant_uuid = hm_uuids.hm_uuid where lower(submission_id) ='" + tidl + "'"
elif len(tid) == 10:
sql = "select " + UUID_SELECTS + " from hm_uuids inner join hm_ancestors on hm_ancestors.descendant_uuid = hm_uuids.hm_uuid where hubmap_base_id ='" + tid + "'"
elif len(tid) == 32:
| |
import sys
import os
cwd=os.getcwd()
work_dir = os.path.join(cwd,'benchmarks')
# os.chdir(work_dir)
sys.path.append(work_dir)
import tensorflow as tf
import benchmark_cnn
from config import Options
from utils import *
from model_builder import Model_Builder
import numpy as np
import random
import math
import copy
def ckpt_to_pb(input_checkpoint, output_graph):
saver = tf.train.import_meta_graph(input_checkpoint+'.meta',clear_devices=True)
#for n in tf.get_default_graph().as_graph_def().node:
# print(n.name)
#exit(0)
output_node_names = 'tower_0/v0/cg/affine2/xw_plus_b'
with tf.Session() as sess:
input_graph_def = sess.graph_def
saver.restore(sess,input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=input_graph_def,
output_node_names=output_node_names.split(','))
with tf.gfile.GFile(output_graph+'.pb', 'wb') as f:
f.write(output_graph_def.SerializeToString())
for n in input_graph_def.node:
print(n.name)
def gen_feed_data(sess, input_list, buf, options, cur_iters):
selet = options.selected_training_labels
if len(input_list) == 3:
im_op, lb_op, or_op = input_list
if buf is None:
buf = [[],[],[]]
while len(buf[0]) < options.batch_size:
cur_iters += 1
images, labels, ori_labels = sess.run([im_op, lb_op, or_op])
for i, l, o in zip(images, labels, ori_labels):
if selet is None or o in selet:
buf[0].append(i)
buf[1].append(l)
buf[2].append(o)
im = np.asarray(buf[0][0:options.batch_size])
lb = np.asarray(buf[1][0:options.batch_size])
ol = np.asarray(buf[2][0:options.batch_size])
buf[0] = buf[0][options.batch_size:]
buf[1] = buf[1][options.batch_size:]
buf[2] = buf[2][options.batch_size:]
if len(lb.shape) < 2:
lb = np.expand_dims(lb,axis=1)
if len(ol.shape) < 2:
ol = np.expand_dims(ol,axis=1)
return (im, lb, ol), buf, cur_iters
elif len(input_list) == 2:
im_op, lb_op = input_list
if buf is None:
buf = [[],[]]
while len(buf[0]) < options.batch_size:
cur_iters += 1
images, labels = sess.run([im_op, lb_op])
for i, l in zip(images, labels):
if selet is None or l in selet:
buf[0].append(i)
buf[1].append(l)
im = np.asarray(buf[0][0:options.batch_size])
lb = np.asarray(buf[1][0:options.batch_size])
buf[0] = buf[0][options.batch_size:]
buf[1] = buf[1][options.batch_size:]
if len(lb.shape) < 2:
lb = np.expand_dims(lb,axis=1)
return (im, lb), buf, cur_iters
def feed_input_by_dict(options, model_name):
if model_name == 'resnet50' and options.selected_training_labels is not None:
return True
return False
def get_run_script(model_name):
if model_name == 'gtsrb':
return 'python3 benchmarks/train_gtsrb.py'
if model_name == 'resnet50':
return 'python3 benchmarks/train_imagenet.py'
if 'resnet101' in model_name:
return 'python3 benchmarks/train_megaface.py'
if 'cifar' in model_name:
return 'python3 benchmarks/train_cifar10.py'
def justify_options_for_model(options, model_name):
if model_name == 'gtsrb':
options.batch_size = 128
options.crop_size = 32
if options.data_subset == 'validation':
options.data_dir = options.home_dir+'data/GTSRB/test/Images/'
else:
options.data_dir = options.home_dir+'data/GTSRB/train/Images/'
elif 'resnet101' in model_name:
options.batch_size = 32
options.crop_size = 128
if options.data_subset == 'validation':
options.data_dir = options.home_dir+'data/MF/test/FaceScrub_aligned/'
else:
options.data_dir = options.home_dir+'data/MF/train/tightly_cropped/'
elif model_name == 'resnet50':
options.batch_size = 32
options.crop_size = 224
options.data_dir = options.home_dir+'data/imagenet/'
elif 'cifar10' in model_name:
options.batch_size = 128
options.crop_size = 32
options.data_dir = options.home_dir+'data/CIFAR-10/'
if options.load_mode == 'normal':
options.backbone_model_path = None
return options
def get_data(options, dataset=None, model_name='gtsrb', phase='train'):
if dataset is None:
if 'gtsrb' == model_name:
import train_gtsrb
if 'test' in options.data_dir:
dataset = train_gtsrb.GTSRBTestDataset(options)
else:
dataset = train_gtsrb.GTSRBDataset(options)
elif 'resnet101' in model_name:
import train_megaface
dataset = train_megaface.MegaFaceDataset(options)
elif 'resnet50' == model_name:
import train_imagenet
dataset = train_imagenet.ImageNetDataset(options)
elif 'cifar10' in model_name:
import train_cifar10
dataset = train_cifar10.CifarDataset(options)
params = benchmark_cnn.make_params()
params = params._replace(batch_size=options.batch_size)
params = params._replace(model='MY_'+model_name)
params = params._replace(num_epochs=options.num_epochs)
params = params._replace(num_gpus=options.num_gpus)
params = params._replace(data_format='NHWC')
params = params._replace(allow_growth=True)
params = params._replace(use_tf_layers=False)
params = params._replace(forward_only=True)
params = benchmark_cnn.setup(params)
model = Model_Builder(model_name, dataset.num_classes, options, params)
is_train = (phase=='train')
p_class = dataset.get_input_preprocessor()
preprocessor = p_class(options.batch_size,
model.get_input_shapes(phase),
options.batch_size,
model.data_type,
is_train,
distortions=params.distortions,
resize_method='bilinear')
ds = preprocessor.create_dataset(batch_size=options.batch_size,
num_splits=1,
batch_size_per_split=options.batch_size,
dataset=dataset,
subset=phase,
train=is_train,
#datasets_repeat_cached_sample = params.datasets_repeat_cached_sample)
datasets_repeat_cached_sample = False)
ds_iter = preprocessor.create_iterator(ds)
input_list = ds_iter.get_next()
return model, dataset, input_list
def get_output(options, dataset=None, model_name='gtsrb'):
model, dataset, input_list = get_data(options, dataset, model_name, options.data_subset)
print('==================Input================')
print(input_list)
feed_list = None
if feed_input_by_dict(options, model_name):
img_holder = tf.placeholder(tf.float32,[options.batch_size,options.crop_size,options.crop_size,3],'input_image')
lb_holder = tf.placeholder(tf.int32,[options.batch_size,1],'input_label')
feed_list = (img_holder, lb_holder)
with tf.variable_scope('v0'):
bld_rst = model.build_network(feed_list,phase_train=False,nclass=dataset.num_classes)
else:
with tf.variable_scope('v0'):
bld_rst = model.build_network(input_list,phase_train=False,nclass=dataset.num_classes)
return model, dataset, input_list, feed_list, bld_rst.logits, bld_rst.extra_info
def generate_sentinet_inputs(a_matrix, a_labels, b_matrix, b_labels, a_is='infected'):
n_intact = b_matrix.shape[0]
width = b_matrix.shape[1]
if a_is=='infected' :
st_cd = width - width//4
ed_cd = width
elif a_is == 'intact':
st_cd = width // 6
ed_cd = width-st_cd
ret_matrix = []
ret_labels = []
idx = list(range(n_intact))
for i in range(100):
a_im = a_matrix[i]
j_list = random.sample(idx, 100)
for j in j_list:
b_im = b_matrix[j].copy()
b_im[st_cd:ed_cd,st_cd:ed_cd,:] = a_im[st_cd:ed_cd, st_cd:ed_cd,:]
ret_matrix.append(b_im)
ret_labels.append(a_labels[i])
b_im = b_im.copy()
b_im[st_cd:ed_cd,st_cd:ed_cd,:] *= 0.1
ret_matrix.append(b_im)
ret_labels.append(b_labels[j])
return np.asarray(ret_matrix), np.asarray(ret_labels)
def test_blended_input(options, model_name='gtsrb'):
options = justify_options_for_model(options,model_name)
options.shuffle= True
options.batch_size = 100
options.num_epochs = 1
options.net_mode = 'normal'
options.data_mode = 'poison_only'
options.load_mode = 'all'
options.fix_level = 'all'
options.build_level = 'logits'
options.poison_fraction = 1
options.poison_subject_labels = [[1]]
options.poison_object_label = [0]
options.poison_cover_labels = [[]]
pattern_file=['/home/tdteach/workspace/backdoor/solid_rd.png']
options.poison_pattern_file = pattern_file
options.selected_training_labels = [1]
model, dataset, input_list = get_data(options,model_name=model_name)
img_op, label_op = input_list
run_iters = np.ceil(dataset.num_examples_per_epoch()/options.batch_size)
run_iters = int(np.ceil(100/options.batch_size))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
a_ims = None
a_lbs = None
init_op = tf.global_variables_initializer()
local_var_init_op = tf.local_variables_initializer()
table_init_ops = tf.tables_initializer() # iterator_initilizor in here
with tf.Session(config=config) as sess:
sess.run(init_op)
sess.run(local_var_init_op)
sess.run(table_init_ops)
for i in range(run_iters):
images, labels = sess.run([img_op, label_op])
if a_ims is None:
a_ims = images
a_lbs = labels
else:
a_ims = np.concatenate((a_ims, images))
a_lbs = np.concatenate((a_lbs, labels))
n_data = a_ims.shape[0]
print(n_data)
options.selected_training_labels = list(range(15,43))
options.data_mode = 'normal'
model, dataset, input_list = get_data(options,model_name=model_name)
b_ims = None
b_lbs = None
img_op, label_op = input_list
init_op = tf.global_variables_initializer()
local_var_init_op = tf.local_variables_initializer()
table_init_ops = tf.tables_initializer() # iterator_initilizor in here
with tf.Session(config=config) as sess:
sess.run(init_op)
sess.run(local_var_init_op)
sess.run(table_init_ops)
for i in range(run_iters):
images, labels = sess.run([img_op, label_op])
if b_ims is None:
b_ims = images
b_lbs = labels
else:
b_ims = np.concatenate((b_ims, images))
b_lbs = np.concatenate((b_lbs, labels))
in_ims, in_lbs = generate_sentinet_inputs(a_ims, a_lbs, b_ims,b_lbs, a_is='infected')
t_ims, t_lbs = generate_sentinet_inputs(b_ims, b_lbs, b_ims,b_lbs, a_is='intact')
in_ims = np.concatenate((in_ims, t_ims))
in_lbs = np.concatenate((in_lbs, t_lbs))
t_ims, t_lbs = generate_sentinet_inputs(b_ims, b_lbs, b_ims,b_lbs, a_is='infected')
in_ims = np.concatenate((in_ims, t_ims))
in_lbs = np.concatenate((in_lbs, t_lbs))
print(in_ims.shape)
#a_matrix = im_matrix[0:1000,:,:,:]
#b_matrix = im_matrix[-1000:,:,:,:]
#c_matrix = im_matrix[1000:2000,:,:,:]
#d_matrix = im_matrix[-2000:-1000,:,:,:]
#wedge_im = (a_matrix+b_matrix)/2
#wedge_lb = -1*np.ones([1000],dtype=np.int32)
#im_matrix = np.concatenate((im_matrix, wedge_im))
#lb_matrix = np.concatenate((lb_matrix, wedge_lb))
#wedge_im = (a_matrix+d_matrix)/2
#wedge_lb = -1*np.ones([1000],dtype=np.int32)
#im_matrix = np.concatenate((im_matrix, wedge_im))
#lb_matrix = np.concatenate((lb_matrix, wedge_lb))
#wedge_im = (c_matrix+b_matrix)/2
#wedge_lb = -1*np.ones([1000],dtype=np.int32)
#im_matrix = np.concatenate((im_matrix, wedge_im))
#lb_matrix = np.concatenate((lb_matrix, wedge_lb))
#wedge_im = (c_matrix+d_matrix)/2
#wedge_lb = -1*np.ones([1000],dtype=np.int32)
#im_matrix = np.concatenate((im_matrix, wedge_im))
#lb_matrix = np.concatenate((lb_matrix, wedge_lb))
#
#for i in range(9):
# wedge_im = a_matrix*0.1*(i+1)+d_matrix*0.1*(10-i-1)
# wedge_lb = -1*np.ones([1000],dtype=np.int32)
# im_matrix = np.concatenate((im_matrix, wedge_im))
# lb_matrix = np.concatenate((lb_matrix, wedge_lb))
def __set_shape(imgs, labels):
imgs.set_shape([options.batch_size,options.crop_size,options.crop_size,3])
labels.set_shape([options.batch_size])
return imgs, labels
n_data = in_ims.shape[0]
run_iters = int(np.ceil(n_data/options.batch_size))
dataset = tf.data.Dataset.from_tensor_slices((in_ims, in_lbs))
dataset = dataset.batch(options.batch_size)
dataset = dataset.map(__set_shape)
dataset = dataset.repeat()
print(dataset.output_types)
print(dataset.output_shapes)
iter = dataset.make_one_shot_iterator()
next_element = iter.get_next()
with tf.variable_scope('v0'):
bld_rst = model.build_network(next_element,phase_train=False,nclass=43)
model.add_backbone_saver()
logits_op, extar_logits_op = bld_rst.logits, bld_rst.extra_info
out_logits = None
out_labels = None
img_op, label_op = next_element
init_op = tf.global_variables_initializer()
local_var_init_op = tf.local_variables_initializer()
table_init_ops = tf.tables_initializer() # iterator_initilizor in here
with tf.Session(config=config) as sess:
sess.run(init_op)
sess.run(local_var_init_op)
sess.run(table_init_ops)
model.load_backbone_model(sess, model_path)
for i in range(run_iters):
logits, labels = sess.run([logits_op, label_op])
pds = np.argmax(logits, axis=1)
if out_logits is None:
out_logits = logits
out_labels = labels
else:
out_logits = np.concatenate((out_logits, logits))
out_labels = np.concatenate((out_labels, labels))
print('===Results===')
np.save('out_X.npy', out_logits)
print('write logits to out_X.npy')
np.save('out_labels.npy', out_labels)
print('write labels to out_labels.npy')
def test_poison_performance(options, model_name):
options.net_mode = 'normal'
if 'colorful' in options.data_mode:
options.data_mode = 'poison_only_colorful'
else:
options.data_mode = 'poison_only'
options.load_mode = 'bottom_affine'
options.poison_fraction = 1
subject_labels = options.poison_subject_labels
if subject_labels is not None:
sl = []
for s in subject_labels:
if s is not None:
sl.extend(s)
if len(sl) > 0:
options.selected_training_labels = sl
else:
options.selected_training_labels = None
options.gen_ori_label = True
else:
options.gen_ori_label = False
return _performance_test(options, model_name)
def test_mask_efficiency(options, global_label, model_name, selected_labels=None):
options.net_mode = 'backdoor_def'
options.data_mode = 'global_label'
options.global_label = global_label
options.load_mode = 'all'
options.selected_training_labels = selected_labels
options.data_subset = 'validation'
options.gen_ori_label = False
return _performance_test(options, model_name)
def test_performance(options, model_name, selected_labels=None):
options.net_mode = 'normal'
options.data_mode = 'normal'
options.poison_fraction = 0
options.load_mode = 'bottom_affine'
options.selected_training_labels = selected_labels
options.gen_ori_label = False
return _performance_test(options, model_name)
def _performance_test(options, model_name):
options.data_subset = 'validation'
options = justify_options_for_model(options,model_name)
options.shuffle = False
options.build_level = 'logits'
options.fix_level = 'all'
options.optimizer = 'sgd'
options.num_epochs = 1
dataset = None
model, dataset, input_list, feed_list, out_op, aux_out_op = get_output(options,dataset=dataset,model_name=model_name)
model.add_backbone_saver()
im_op = input_list[0]
lb_op = input_list[1]
buf = None
acc = 0
t_e = 0
cur_iters = 0
run_iters = math.ceil(dataset.num_examples_per_epoch(options.data_subset)/options.batch_size)
if feed_list is not None:
run_iters = min(10, run_iters)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
init_op = tf.global_variables_initializer()
local_var_init_op = tf.local_variables_initializer()
table_init_ops = tf.tables_initializer() # iterator_initilizor in here
with tf.Session(config=config) as sess:
sess.run(init_op)
sess.run(local_var_init_op)
sess.run(table_init_ops)
model.load_backbone_model(sess, options.backbone_model_path)
while cur_iters < run_iters:
if run_iters <= 10:
print(cur_iters)
elif (cur_iters%10 == 0):
print(cur_iters)
if feed_list is not None:
feed_data, buf, cur_iters | |
# -*- coding: utf-8 -*-
"""
Usage :
> python bsgs_dll_secp256k1.py -p <KEY> -b 2B_bPfile.bin -bl 2B_bloom.bin -n 500000000000000 -keyspace 800000000000000000000000000000:FFFFFFFFFFFFFFFFFFFFFFFFFFFFFF -rand
> python bsgs_dll_secp256k1.py -p <KEY> -b 2B_bPfile.bin -bl 2B_bloom.bin -n 500000000000000 -rand1
@author: iceland
@credits: Alberto, Keyhunt gmp library
"""
import time
import random
import bit
import os
import ctypes
import math
import sys
import platform
import argparse
parser = argparse.ArgumentParser(description='This tool use bsgs algo for sequentially searching 1 pubkey in the given range using 1 cpu',
epilog='Enjoy the program! :) Tips BTC: bc1q39meky2mn5qjq704zz0nnkl0v7kj4uz6r529at \
\nThanks a lot to AlbertoBSD Tips BTC: 1ABSD1rMTmNZHJrJP8AJhDNG1XbQjWcRz7')
parser.version = '13072021'
parser.add_argument("-p", "--pubkey", help = "Public Key in hex format (compressed or uncompressed)", required=True)
parser.add_argument("-b", "--bpfile", help = "Baby Point file. created using create_bPfile_mcpu2.py", required=True)
parser.add_argument("-bl", "--bloomfile", help = "Bloom filter file. created using bPfile_2_bloom_dll_batch.py", required=True)
parser.add_argument("-n", help = "Total sequential search in 1 loop. default=50000000000000", action='store')
parser.add_argument("-keyspace", help = "Keyspace Range ( hex ) to search from min:max. default=1:order of curve", action='store')
parser.add_argument("-rand", help = "Start from a random value in the given range from min:max and search n values then again take a new random", action="store_true")
parser.add_argument("-rand1", help = "First Start from a random value, then go fully sequential, in the given range from min:max", action="store_true")
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
seq = int(args.n) if args.n else 50000000000000
ss = args.keyspace if args.keyspace else '1:FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364140'
flag_random = True if args.rand else False
flag_random1 = True if args.rand1 else False
bs_file = args.bpfile # 'FULLbpfile.bin'
bloom_file = args.bloomfile # 'Big_dll_bloom.bin'
public_key = args.pubkey # '<KEY>'
if flag_random1: flag_random = True
###############################################################################
a, b = ss.split(':')
a = int(a, 16)
b = int(b, 16)
if os.path.isfile(bloom_file) == False:
print('File {} not found'.format(bloom_file))
print('create it from : bPfile_2_bloom_dll.py')
sys.exit()
if os.path.isfile(bs_file) == False:
print('File {} not found'.format(bs_file))
print('Specify the file used to create the bloom filter or create it from : create_bPfile_mcpu.py. Even little smaller file is OK.')
sys.exit()
# ======== 1st Part : File ===============
# N2 = 0X7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0
N = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364140
# m = 40000000 # m = math.floor(math.sqrt(k2-k1))
m_bb = int((os.stat(bs_file).st_size)/32) # each xpoint is 32 bytes in the file
lastitem = 0
if platform.system().lower().startswith('win'):
pathdll = os.path.realpath('bloom_batch.dll')
mylib = ctypes.CDLL(pathdll)
elif platform.system().lower().startswith('lin'):
pathdll = os.path.realpath('bloom_batch.so')
mylib = ctypes.CDLL(pathdll)
else:
print('[-] Unsupported Platform currently for ctypes dll method. Only [Windows and Linux] is working')
sys.exit()
bloom_check_add = mylib.bloom_check_add
bloom_check_add.restype = ctypes.c_int
bloom_check_add.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_ulonglong, ctypes.c_ubyte, ctypes.c_char_p]
###############################################################################
if platform.system().lower().startswith('win'):
pathdll = os.path.realpath('ice_secp256k1.dll')
ice = ctypes.CDLL(pathdll)
elif platform.system().lower().startswith('lin'):
pathdll = os.path.realpath('ice_secp256k1.so')
ice = ctypes.CDLL(pathdll)
else:
print('[-] Unsupported Platform currently for ctypes dll method. Only [Windows and Linux] is working')
sys.exit()
ice.scalar_multiplication.argtypes = [ctypes.c_char_p, ctypes.c_char_p] # pvk,ret
# ice.point_increment.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p] # x,y,ret
ice.point_negation.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p] # x,y,ret
# ice.point_doubling.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p] # x,y,ret
# ice.hash_to_address.argtypes = [ctypes.c_int, ctypes.c_bool, ctypes.c_char_p] # 012,comp,hash
# ice.hash_to_address.restype = ctypes.c_char_p
# ice.pubkey_to_address.argtypes = [ctypes.c_int, ctypes.c_bool, ctypes.c_char_p, ctypes.c_char_p] # 012,comp,x,y
# ice.pubkey_to_address.restype = ctypes.c_char_p
# ice.create_baby_table.argtypes = [ctypes.c_ulonglong, ctypes.c_ulonglong, ctypes.c_char_p] # start,end,ret
ice.point_addition.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p] # x1,y1,x2,y2,ret
ice.point_subtraction.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p] # x1,y1,x2,y2,ret
ice.point_loop_subtraction.argtypes = [ctypes.c_ulonglong, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p] # k,x1,y1,x2,y2,ret
ice.init_secp256_lib()
def scalar_multiplication(kk):
res = (b'\x00') * 65
pass_int_value = hex(kk)[2:].encode('utf8')
ice.scalar_multiplication(pass_int_value, res)
return res
# =============================================================================
# def point_increment(pubkey_bytes):
# x1 = pubkey_bytes[1:33]
# y1 = pubkey_bytes[33:]
# res = (b'\x00') * 65
# ice.point_increment(x1, y1, res)
# return res
# =============================================================================
def point_negation(pubkey_bytes):
x1 = pubkey_bytes[1:33]
y1 = pubkey_bytes[33:]
res = (b'\x00') * 65
ice.point_negation(x1, y1, res)
return res
# =============================================================================
# def hash_to_address(addr_type, iscompressed, hash160_bytes):
# # type = 0 [p2pkh], 1 [p2sh], 2 [bech32]
# res = ice.pubkey_to_address(addr_type, iscompressed, hash160_bytes)
# return res.decode('utf8')
#
# def pubkey_to_address(addr_type, iscompressed, pubkey_bytes):
# # type = 0 [p2pkh], 1 [p2sh], 2 [bech32]
# x1 = pubkey_bytes[1:33]
# y1 = pubkey_bytes[33:]
# res = ice.pubkey_to_address(addr_type, iscompressed, x1, y1)
# return res.decode('utf8')
#
# def create_baby_table(start_value, end_value):
# res = (b'\x00') * ((1+end_value-start_value) * 32)
# ice.create_baby_table(start_value, end_value, res)
# return res
#
# def point_doubling(pubkey_bytes):
# x1 = pubkey_bytes[1:33]
# y1 = pubkey_bytes[33:]
# res = (b'\x00') * 65
# ice.point_doubling(x1, y1, res)
# return res
# =============================================================================
def point_addition(pubkey1_bytes, pubkey2_bytes):
x1 = pubkey1_bytes[1:33]
y1 = pubkey1_bytes[33:]
x2 = pubkey2_bytes[1:33]
y2 = pubkey2_bytes[33:]
res = (b'\x00') * 65
ice.point_addition(x1, y1, x2, y2, res)
return res
def point_subtraction(pubkey1_bytes, pubkey2_bytes):
x1 = pubkey1_bytes[1:33]
y1 = pubkey1_bytes[33:]
x2 = pubkey2_bytes[1:33]
y2 = pubkey2_bytes[33:]
res = (b'\x00') * 65
ice.point_subtraction(x1, y1, x2, y2, res)
return res
def point_loop_subtraction(num, pubkey1_bytes, pubkey2_bytes):
x1 = pubkey1_bytes[1:33]
y1 = pubkey1_bytes[33:]
x2 = pubkey2_bytes[1:33]
y2 = pubkey2_bytes[33:]
res = (b'\x00') * (65 * num)
ice.point_loop_subtraction(num, x1, y1, x2, y2, res)
return res
###############################################################################
def randk(a, b):
if flag_random:
random.seed(random.randint(1,2**256))
return random.SystemRandom().randint(a, b)
else:
if lastitem == 0:
return a
elif lastitem > b:
print('[+] Range Finished')
exit()
else:
return lastitem + 1
def scan_str(num):
# Kilo/Mega/Giga/Tera/Peta/Exa/Zetta/Yotta
dict_suffix = {0:'', 1:'Thousands', 2:'Million', 3:'Billion', 4:'Trillion'}
num *= 1.0
idx = 0
for ii in range(4):
if int(num/1000) > 0:
idx += 1
num /= 1000
return ('%.5f '%num)+dict_suffix[idx]
# =============================================================================
# def pub2point(pub_hex):
# x = int(pub_hex[2:66],16)
# if len(pub_hex) < 70:
# y = bit.format.x_to_y(x, int(pub_hex[:2],16)%2)
# else:
# y = int(pub_hex[66:], 16)
# return ec.Point(x, y)
# =============================================================================
def pub2upub(pub_hex):
x = int(pub_hex[2:66],16)
if len(pub_hex) < 70:
y = bit.format.x_to_y(x, int(pub_hex[:2],16)%2)
else:
y = int(pub_hex[66:],16)
return bytes.fromhex('04'+ hex(x)[2:].zfill(64) + hex(y)[2:].zfill(64))
# =============================================================================
# def sym_point(this_point):
# # find the symmetrical point from Order of the Curve
# parity = 0 if this_point.y % 2 == 1 else 1 # flip the parity to get other Point
# other_y = bit.format.x_to_y(this_point.x, parity)
# return ec.Point(this_point.x, other_y)
# =============================================================================
def read_FULL_baby_file(num_bytes):
# a = array('B')
# elem = int((os.stat(bs_file).st_size)/3)
with open(bs_file,'rb') as f:
a = bytes(f.read(num_bytes))
# a.fromfile(f, elem)
return a
def bloom_read_dll_from_file():
with open(bloom_file,'rb') as f:
ba_bloom2 = bytes(f.read())
# ba_bloom2 = bytes( bytearray(f.read()) )
return ba_bloom2
###############################################################################
st = time.time()
print('\n[+] Starting Program : BSGS mode Version [', parser.version,']')
# Q = pub2point(public_key)
Q = bytes(bytearray(pub2upub(public_key)))
print('[+] Search Started for the Public key: ',Q.hex())
# print(Q)
# Sym_Q = sym_point(Q)
if flag_random1 == True:
print('[+] Search Mode: Random Start then Fully sequential from it')
elif flag_random == True:
print('[+] Search Mode: Random Start after every n sequential key search')
else:
print('[+] Search Mode: Sequential search in the given range')
bloom_filter = bloom_read_dll_from_file()
print('[+] Reading bloom filter from file complete in : {0:.5f} sec'.format(time.time() - st))
st = time.time()
# =============================================================================
bloom_bits = len(bloom_filter)*8 # 862655256 # fix value. Dont change
bloom_hashes = 30 # fix value. Dont change
bloom_prob = 0.000000001 # False Positive = 1 out of 1 billion
bloom_bpe = -(math.log(bloom_prob) / 0.4804530139182014)
m = math.floor(bloom_bits/bloom_bpe)
# =============================================================================
w = math.ceil(math.sqrt(m)) # secondary table elements needed
#if w*100 < m_bb:
# w = w*100 # use 10x elements from bP table. For faster 2nd round of search.
if w > m_bb:
print('[*] Warning. Small bPfile found. 2nd check will be slow. Is it ok ? Proceeding...')
w = m_bb
# =============================================================================
#bloom_filter_gmp = gmpy2.xmpz(int.from_bytes(bloom_filter.tobytes(), byteorder='big'))
#del bloom_filter
# =============================================================================
baby_bin = read_FULL_baby_file(w*32)
print('[+] Reading Baby table from file complete in : {0:.5f} sec'.format(time.time() - st))
st = time.time()
baby_dict = {sys.intern(baby_bin[i*32:i*32+32].hex()):i for i in range(w)}
# baby_steps = [baby_bin[cnt*32:cnt*32+32].hex() for cnt in range(w)]
# baby_steps = {int(line,10):k for k, line in enumerate(baby_steps)}
# baby_steps = set(baby_steps)
# =============================================================================
# We have to solve P = k.G, we know that k lies in the range ]k1,k2]
k1 = randk(a, b) # start from
k2 = k1 + seq
# Reset the flag after getting 1st Random Start Key
if flag_random1 == True: flag_random = False
print('[+] seq value:',seq,' m value :' , m)
print('[+] Search Range:',hex(a),' to ', hex(b))
###############################################################################
print(' [+] k1:', hex(k1))
k1G = bytes(bytearray(scalar_multiplication(k1)))
mG = bytes(bytearray(scalar_multiplication(m)))
mGneg = bytes(bytearray(point_negation(mG)))
wG = bytes(bytearray(scalar_multiplication(w)))
st = time.time()
###############################################################################
def bsgs_exact_key(pubkey_point, z1, z2):
z1G = bytes(bytearray(scalar_multiplication(z1)))
if z1G == pubkey_point:
print('============== KEYFOUND ==============')
print('BSGS FOUND PrivateKey ', hex(z1))
print('======================================')
exit()
S = bytes(bytearray(point_subtraction(pubkey_point, z1G)))
S2_list = bytes(bytearray(point_loop_subtraction( 1+(k2-k1)//m, S, wG )))
curr_byte_pos = 0
hex_line = bytes(bytearray(S[1:33]))
stp = 0
# print('[*] Bloom collision... Checking inside the bPfile Now... ')
while stp<(1+z2-z1):
# hex_line = bytes(bytearray(S[1:33])) # bytes case # S[2:66] for string case
# idx = baby_bin.find(bytes.fromhex(hex_line), 0)
# idx = baby_bin.find(hex_line, 0)
idx = baby_dict.get(hex_line.hex())
if idx is not None and idx >= 0:
# kk = z1+stp+int(idx/32)+1
kk = z1+stp+idx+1
# print('[*] Bloom collision and bPfile collision... Final check for the key', hex(kk))
if bytes(bytearray(scalar_multiplication(kk))) == pubkey_point:
print('============== KEYFOUND ==============')
print('BSGS FOUND PrivateKey ',hex(kk))
print('======================================')
exit()
else:
# S = bytes(bytearray(point_subtraction(S, wG)))
stp = stp + w
else:
# Giant step
# S = bytes(bytearray(point_subtraction(S, wG)))
stp = stp + w
hex_line = bytes(bytearray(S2_list[curr_byte_pos+1:curr_byte_pos+33]))
curr_byte_pos += 65
print('[-] A False collision ignored. Bloom collision, but No bPfile collision.')
#################################
def bsgs_keys(pubkey_point, k1, k2):
found = False
if pubkey_point == k1G:
| |
import numpy as np
import sys
import time
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import GroupKFold
from sklearn.base import BaseEstimator
from scipy.linalg import cholesky, solve_triangular
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
from ml_dft.kernel_functions import RBFKernel, MaternKernel
import os
import warnings
def get_alpha_add(n_basis, n_grid, delta, v):
alpha_add = np.pi * ((np.arange(n_basis / 2) / (n_grid * delta))**2 + v**2) / v
alpha_add = np.repeat(alpha_add, 2)
return alpha_add
class MultivariateGaussianProcessCV(BaseEstimator):
def __init__(self, krr_param_grid=None, cv_type=None, cv_nfolds=5, cv_groups=None,
cv_shuffles=1, n_components=None, single_combo=True,
verbose=0, copy_X=True, v=None, n_basis=None, n_grid=None, delta=None,
id=1, cleanup=True, kernel=None, squared_dist=False, kernel_params=None,
delta_learning=False, mae=False, replace_fit=True):
self.krr_param_grid = krr_param_grid
self.verbose = verbose
self.cv_nfolds = cv_nfolds
self.cv_type = cv_type
self.cv_groups = cv_groups
self.cv_shuffles = cv_shuffles
self.n_components = n_components
self.single_combo = single_combo
self.copy_X = copy_X
self.n_grid = n_grid
self.delta = delta
self.n_basis = n_basis
self.id = id
self.cleanup = cleanup
self.kernel = kernel
self.squared_dist = squared_dist
self.device = None
self.replace_fit = replace_fit
self.delta_learning = delta_learning
self.mae = mae
if self.kernel is None:
self.kernel = RBFKernel()
elif self.kernel == 'rbf':
self.kernel = RBFKernel(**kernel_params)
elif self.kernel == 'matern':
self.kernel = MaternKernel(**kernel_params)
if 'v' in self.krr_param_grid is not None and not single_combo:
raise ValueError('Can only add to alpha if single_combo=True')
def score(self, y_true, y_pred):
return np.mean((y_true - y_pred) ** 2)
def fit(self, X, y, labels=None, dist=None, importance_weights=None, cv_indices=None,
dist_savename=None):
t = time.time()
if y.ndim < 2:
y = y.reshape(-1, 1)
if self.n_components is not None:
if self.verbose > 0:
elapsed = time.time() - t
print('PCA [%dmin %dsec]' % (int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
self.pca = PCA(n_components=self.n_components, svd_solver='arpack')
y_ = self.pca.fit_transform(y)
if self.verbose > 0:
print('Lost %.1f%% information ' % (self.pca.noise_variance_) +
'[%dmin %dsec]' % (int(elapsed / 60), int(elapsed % 60)))
elapsed = time.time() - t
else:
y_ = y
if labels is not None:
raise RuntimeError('Not implemented.')
if cv_indices is None:
cv_indices = np.arange(X.shape[0])
if self.cv_type is None:
kfold = RepeatedKFold(n_splits=self.cv_nfolds, n_repeats=self.cv_shuffles)
cv_folds = kfold.split(X[cv_indices])
n_cv_folds = kfold.get_n_splits()
elif self.cv_type == 'iter':
cv_folds = self.cv_groups
n_cv_folds = len(self.cv_groups)
elif self.cv_type == 'group':
groups = self.cv_groups
if self.cv_nfolds is None:
self.cv_nfolds = len(np.unique(groups))
kfold = GroupKFold(n_splits=self.cv_nfolds)
cv_folds = kfold.split(X[cv_indices], y[cv_indices], groups)
n_cv_folds = kfold.get_n_splits()
else:
raise Exception('Cross-validation type not supported')
add_train_inds = np.setdiff1d(np.arange(X.shape[0]), cv_indices)
cv_folds = list(cv_folds)
cv_folds = [(np.concatenate((train_fold, add_train_inds)), test_fold) for train_fold, test_fold in cv_folds]
if self.verbose > 0:
elapsed = time.time() - t
print('Computing distance matrix [%dmin %dsec]' % (
int(elapsed / 60), int(elapsed % 60)))
sys.stdout.flush()
if dist is None:
dist = euclidean_distances(X, None, squared=self.squared_dist)
if dist_savename is not None:
if self.verbose > 0:
print('Saving distance matrix to file:', dist_savename)
np.save(dist_savename, dist)
if importance_weights is None:
self.krr_param_grid['lambda'] = [0]
importance_weights = np.ones((X.shape[0], ))
importance_weights = importance_weights**(0.5)
errors = []
if 'v' in self.krr_param_grid:
for fold_i, (train_i, test_i) in enumerate(cv_folds):
fold_errors = np.empty((len(self.krr_param_grid['v']),
len(self.krr_param_grid['gamma']),
1,
len(self.krr_param_grid['alpha']), y_.shape[1]))
if self.verbose > 0:
elapsed = time.time() - t
print('CV %d of %d [%dmin %dsec]' % (fold_i + 1,
n_cv_folds,
int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
for v_i, v in enumerate(self.krr_param_grid['v']):
for gamma_i, gamma in enumerate(self.krr_param_grid['gamma']):
for lamb_i, lamb in enumerate(self.krr_param_grid['lambda']):
iw = importance_weights**lamb
iw = iw[:, None]
K_train = self.kernel.apply_to_dist(dist[np.ix_(train_i, train_i)], gamma=gamma)
K_train *= np.outer(iw[train_i], iw[train_i])
K_test = self.kernel.apply_to_dist(dist[np.ix_(test_i, train_i)], gamma=gamma)
if self.verbose > 0:
sys.stdout.write('.')
sys.stdout.flush()
for alpha_i, alpha in enumerate(self.krr_param_grid['alpha']):
if self.verbose > 0:
sys.stdout.write(',')
sys.stdout.flush()
for y_i in np.arange(y_.shape[1]):
K_train_ = K_train.copy()
alpha_add = get_alpha_add(self.n_basis, self.n_grid, self.delta, v)
K_train_.flat[::K_train_.shape[0] + 1] += alpha * alpha_add[y_i]
try:
L_ = cholesky(K_train_, lower=True)
x = solve_triangular(L_, y_[train_i, y_i], lower=True)
dual_coef_ = solve_triangular(L_.T, x)
pred_mean = np.dot(K_test, dual_coef_)
if self.mae:
e = np.mean(np.abs(pred_mean - y_[test_i, y_i]), 0)
else:
e = np.mean((pred_mean - y_[test_i, y_i]) ** 2, 0)
except np.linalg.LinAlgError:
e = np.inf
fold_errors[v_i, gamma_i, 0, alpha_i, y_i] = e
if self.verbose > 0:
sys.stdout.write('\n')
sys.stdout.flush()
errors.append(fold_errors)
errors = np.array(errors)
errors = np.mean(errors, 0) # average over folds
else:
for fold_i, (train_i, test_i) in enumerate(cv_folds):
fold_errors = np.empty((len(self.krr_param_grid['gamma']),
len(self.krr_param_grid['lambda']),
len(self.krr_param_grid['alpha']), y_.shape[1]))
if self.verbose > 0:
elapsed = time.time() - t
print('CV %d of %d [%dmin %dsec]' % (fold_i + 1,
n_cv_folds,
int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
for gamma_i, gamma in enumerate(self.krr_param_grid['gamma']):
if self.verbose > 0:
sys.stdout.write('.')
sys.stdout.flush()
for lamb_i, lamb in enumerate(self.krr_param_grid['lambda']):
iw = importance_weights**lamb
iw = iw[:, None]
K_train = self.kernel.apply_to_dist(dist[np.ix_(train_i, train_i)], gamma=gamma)
K_train *= np.outer(iw[train_i], iw[train_i])
K_test = self.kernel.apply_to_dist(dist[np.ix_(test_i, train_i)], gamma=gamma)
for alpha_i, alpha in enumerate(self.krr_param_grid['alpha']):
if self.verbose > 0:
sys.stdout.write(',')
sys.stdout.flush()
K_train_ = K_train.copy()
K_train_.flat[::K_train_.shape[0] + 1] += alpha
try:
L_ = cholesky(K_train_, lower=True)
x = solve_triangular(L_, iw[train_i] * y_[train_i], lower=True)
dual_coef_ = iw[train_i] * solve_triangular(L_.T, x)
pred_mean = np.dot(K_test, dual_coef_)
if self.mae:
e = np.mean(np.abs(pred_mean - y_[test_i]) * importance_weights[test_i, None]**2, 0)
else:
e = np.mean(((pred_mean - y_[test_i]) ** 2) * importance_weights[test_i, None]**2, 0)
except np.linalg.LinAlgError:
e = np.inf
fold_errors[gamma_i, lamb_i, alpha_i] = e
if self.verbose > 0:
sys.stdout.write('\n')
sys.stdout.flush()
errors.append(fold_errors)
errors = np.array(errors)
errors = np.mean(errors, 0) # average over folds
self.dual_coefs_ = np.empty((y_.shape[1], X.shape[0]))
self.alphas_ = np.empty(y_.shape[1])
self.lambdas_ = np.empty(y_.shape[1])
self.gammas_ = np.empty(y_.shape[1])
if self.verbose > 0:
elapsed = time.time() - t
print('Refit [%dmin %dsec]' % (int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
print_count = 0
if not self.single_combo:
for i in range(y_.shape[1]):
min_params = np.argsort(errors[:, :, :, i], axis=None)
# lin_alg_errors = 0
gamma_i, lamb_i, alpha_i = np.unravel_index(min_params[0],
errors.shape[:2])
gamma = self.krr_param_grid['gamma'][gamma_i]
lamb = self.krr_param_grid['lambda'][lamb_i]
alpha = self.krr_param_grid['alpha'][alpha_i]
self.alphas_[i] = alpha
self.gammas_[i] = gamma
self.lambdas_[i] = lamb
if (gamma_i in (0, len(self.krr_param_grid['gamma']) - 1) or
lamb_i in (0, len(self.krr_param_grid['lambda']) - 1) or
alpha_i in (0, len(self.krr_param_grid['alpha']) - 1)):
if print_count <= 200:
fmtstr = '%d: gamma=%g\talpha=%g\tlambda=%g\terror=%g\tmean=%g'
print(fmtstr % (i, gamma, alpha, lamb,
errors[gamma_i, lamb_i, alpha_i, i],
errors[gamma_i, lamb_i, alpha_i, i] /
np.mean(np.abs(y_[:, i]))))
print_count += 1
else:
errors = np.mean(errors, -1) # average over outputs
if self.verbose > 1:
print('CV errors:')
print(errors)
print('Alpha params:')
print(self.krr_param_grid['alpha'])
print('Gamma params:')
print(self.krr_param_grid['gamma'])
print('Lambda params:')
print(self.krr_param_grid['lambda'])
if self.verbose > 0:
print('Min error: ', np.min(errors))
# print np.log(errors)
# plt.imshow(np.log(errors))
# plt.xticks(range(10), map('{:.1e}'.format, list(self.krr_param_grid['alpha'])))
# plt.yticks(range(10), map('{:.1e}'.format, list(self.krr_param_grid['gamma'])))
# plt.xlabel('alpha')
# plt.ylabel('gamma')
# plt.colorbar()
# plt.show()
min_params = np.argsort(errors, axis=None)
if 'v' in self.krr_param_grid:
v_i, gamma_i, lamb_i, alpha_i = np.unravel_index(min_params[0],
errors.shape)
else:
gamma_i, lamb_i, alpha_i = np.unravel_index(min_params[0],
errors.shape)
if 'v' in self.krr_param_grid:
v = self.krr_param_grid['v'][v_i]
print('v=', v)
gamma = self.krr_param_grid['gamma'][gamma_i]
alpha = self.krr_param_grid['alpha'][alpha_i]
lamb = self.krr_param_grid['lambda'][lamb_i]
if 'v' in self.krr_param_grid:
if v == self.krr_param_grid['v'][0]:
print('v at lower edge.')
if v == self.krr_param_grid['v'][-1]:
print('v at upper edge.')
if len(self.krr_param_grid['gamma']) > 1:
if gamma == self.krr_param_grid['gamma'][0]:
print('Gamma at lower edge.')
if gamma == self.krr_param_grid['gamma'][-1]:
print('Gamma at upper edge.')
if len(self.krr_param_grid['alpha']) > 1:
if alpha == self.krr_param_grid['alpha'][0]:
print('Alpha at lower edge.')
if alpha == self.krr_param_grid['alpha'][-1]:
print('Alpha at upper edge.')
if len(self.krr_param_grid['lambda']) > 1:
if lamb == self.krr_param_grid['lambda'][0]:
print('Lambda at lower edge.')
if lamb == self.krr_param_grid['lambda'][-1]:
print('Lambda at upper edge.')
self.alphas_[:] = alpha
self.gammas_[:] = gamma
self.lambdas_[:] = lamb
if 'v' in self.krr_param_grid:
alpha_add = get_alpha_add(self.n_basis, self.n_grid, self.delta, v)
self.alphas_ *= alpha_add
combos = list(zip(self.alphas_, self.gammas_, self.lambdas_))
n_unique_combos = len(set(combos))
self.L_fit_ = [None] * n_unique_combos
for i, (alpha, gamma, lamb) in enumerate(set(combos)):
if self.verbose > 0:
elapsed = time.time() - t
print('Parameter combinations ' +
'%d of %d [%dmin %dsec]' % (i + 1, n_unique_combos,
int(elapsed / 60),
int(elapsed % 60)))
sys.stdout.flush()
y_list = [i for i in range(y_.shape[1]) if
self.alphas_[i] == alpha and self.gammas_[i] == gamma and self.lambdas_[i] == lamb]
iw = importance_weights**lamb
iw = iw[:, None]
K = self.kernel.apply_to_dist(dist, gamma=gamma)
K *= np.outer(iw, iw)
# np.exp(K, K)
while True:
K.flat[::K.shape[0] + 1] += alpha - (alpha / 10)
try:
if self.verbose > 0:
print('trying cholesky decomposition, alpha', alpha)
L_ = cholesky(K, lower=True)
self.L_fit_[i] = L_
x = solve_triangular(L_, iw * y_[:, y_list], lower=True)
# x = solve_triangular(L_, y_[:, y_list], lower=True)
dual_coef_ = solve_triangular(L_.T, x)
self.dual_coefs_[y_list] = iw.T * dual_coef_.T.copy()
break
except np.linalg.LinAlgError:
if self.verbose > 0:
print('LinalgError, increasing alpha')
alpha *= 10
self.alphas_[0] = alpha
if self.copy_X:
self.X_fit_ = X.copy()
self.y_fit_ = y.copy()
else:
self.X_fit_ = X
self.y_fit_ = y
self.errors = errors
if self.verbose > 0:
elapsed = time.time() | |
+ ['port:eth101/1/32'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/33'], hostname=hn102)
aggregator.assert_metric(metric_name, value=445946184.0, tags=tags102 + ['port:eth1/48'], hostname=hn102)
aggregator.assert_metric(metric_name, value=1195355049.0, tags=tags102 + ['port:eth1/49'], hostname=hn102)
aggregator.assert_metric(metric_name, value=1558331294.0, tags=tags102 + ['port:eth1/50'], hostname=hn102)
aggregator.assert_metric(metric_name, value=236470350.0, tags=tags102 + ['port:eth1/1'], hostname=hn102)
aggregator.assert_metric(metric_name, value=236534244.0, tags=tags102 + ['port:eth1/2'], hostname=hn102)
aggregator.assert_metric(metric_name, value=549032.0, tags=tags102 + ['port:eth1/3'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102)
aggregator.assert_metric(metric_name, value=127544.0, tags=tags102 + ['port:eth1/6'], hostname=hn102)
aggregator.assert_metric(metric_name, value=280084025.0, tags=tags102 + ['port:eth1/7'], hostname=hn102)
aggregator.assert_metric(metric_name, value=278551439.0, tags=tags102 + ['port:eth1/8'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102)
aggregator.assert_metric(metric_name, value=829143147.0, tags=tags202 + ['port:eth1/1'], hostname=hn202)
aggregator.assert_metric(metric_name, value=3015349024.0, tags=tags202 + ['port:eth1/2'], hostname=hn202)
aggregator.assert_metric(metric_name, value=2484948057.0, tags=tags201 + ['port:eth1/1'], hostname=hn201)
aggregator.assert_metric(metric_name, value=1005106879.0, tags=tags201 + ['port:eth1/2'], hostname=hn201)
metric_name = 'cisco_aci.fabric.port.egr_bytes.multicast.cum'
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=236383592.0, tags=tags101 + ['port:eth1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=236450478.0, tags=tags101 + ['port:eth1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=547520.0, tags=tags101 + ['port:eth1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=287182563.0, tags=tags101 + ['port:eth1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=21312.0, tags=tags101 + ['port:eth1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=318246201.0, tags=tags101 + ['port:eth1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=18204.0, tags=tags101 + ['port:eth1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=4488.0, tags=tags101 + ['port:eth1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=236423309.0, tags=tags101 + ['port:eth1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=446236358.0, tags=tags101 + ['port:eth1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=1466364460.0, tags=tags101 + ['port:eth1/49'], hostname=hn101)
aggregator.assert_metric(metric_name, value=1133167391.0, tags=tags101 + ['port:eth1/50'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/33'], hostname=hn102)
aggregator.assert_metric(metric_name, value=445946184.0, tags=tags102 + ['port:eth1/48'], hostname=hn102)
aggregator.assert_metric(metric_name, value=1195355049.0, tags=tags102 + ['port:eth1/49'], hostname=hn102)
aggregator.assert_metric(metric_name, value=1558331294.0, tags=tags102 + ['port:eth1/50'], hostname=hn102)
aggregator.assert_metric(metric_name, value=236470350.0, tags=tags102 + ['port:eth1/1'], hostname=hn102)
aggregator.assert_metric(metric_name, value=236534244.0, tags=tags102 + ['port:eth1/2'], hostname=hn102)
aggregator.assert_metric(metric_name, value=549032.0, tags=tags102 + ['port:eth1/3'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102)
aggregator.assert_metric(metric_name, value=127544.0, tags=tags102 + ['port:eth1/6'], hostname=hn102)
aggregator.assert_metric(metric_name, value=280084025.0, tags=tags102 + ['port:eth1/7'], hostname=hn102)
aggregator.assert_metric(metric_name, value=278551439.0, tags=tags102 + ['port:eth1/8'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102)
aggregator.assert_metric(metric_name, value=829143147.0, tags=tags202 + ['port:eth1/1'], hostname=hn202)
aggregator.assert_metric(metric_name, value=3015349024.0, tags=tags202 + ['port:eth1/2'], hostname=hn202)
aggregator.assert_metric(metric_name, value=2484948057.0, tags=tags201 + ['port:eth1/1'], hostname=hn201)
aggregator.assert_metric(metric_name, value=1005106879.0, tags=tags201 + ['port:eth1/2'], hostname=hn201)
metric_name = 'cisco_aci.fabric.node.cpu.max'
aggregator.assert_metric(metric_name, value=3.6080520000000007, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.5515670000000057, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.6742420000000067, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.5998479999999944, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.612936000000005, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.564656999999997, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.603033999999994, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.617963000000003, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.7074400000000054, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.636823000000007, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.5678139999999985, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.635904999999994, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=3.336716999999993, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.4870499999999964, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.4300130000000024, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.385548, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.498800000000003, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.5096580000000017, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.5543229999999966, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.5371399999999937, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.547083999999998, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.388972999999993, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.5001259999999945, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=3.4704689999999943, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=12.135921999999994, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=12.322153999999998, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=11.833715999999995, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=12.172141999999994, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=12.037987999999999, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=12.229298999999997, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=12.168367000000003, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=12.015305999999995, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=12.280254999999997, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=11.982625999999996, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=12.034676000000005, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=11.964286000000001, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(
metric_name,
value=7.0,
tags=[
'apic_role:controller',
'node_id:3',
'fabric_state:unknown',
'fabric_pod_id:1',
'cisco',
'project:cisco_aci',
],
hostname='pod-1-node-3',
)
aggregator.assert_metric(
metric_name,
value=14.0,
tags=[
'apic_role:controller',
'node_id:1',
'fabric_state:unknown',
'fabric_pod_id:1',
'cisco',
'project:cisco_aci',
],
hostname='pod-1-node-1',
)
aggregator.assert_metric(metric_name, value=11.881692999999999, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=12.369293999999996, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=11.892583000000002, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=11.806616000000005, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=11.635621, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=11.815635, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=11.728710000000007, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=12.096569000000002, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=11.489253000000005, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=11.755685999999997, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=11.997952999999995, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(metric_name, value=12.017387, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(
metric_name,
value=11.0,
tags=[
'apic_role:controller',
'node_id:2',
'fabric_state:unknown',
'fabric_pod_id:1',
'cisco',
'project:cisco_aci',
],
hostname='pod-1-node-2',
)
metric_name = 'cisco_aci.fabric.node.mem.avg'
aggregator.assert_metric(metric_name, value=10559963.0, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=10491187.0, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=10747828.0, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(
metric_name,
value=37859173.0,
tags=[
'apic_role:controller',
'node_id:3',
'fabric_state:unknown',
'fabric_pod_id:1',
'cisco',
'project:cisco_aci',
],
hostname='pod-1-node-3',
)
aggregator.assert_metric(
metric_name,
value=43008145.0,
tags=[
'apic_role:controller',
'node_id:1',
'fabric_state:unknown',
'fabric_pod_id:1',
'cisco',
'project:cisco_aci',
],
hostname='pod-1-node-1',
)
aggregator.assert_metric(metric_name, value=10814699.0, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(
metric_name,
value=34463186.0,
tags=[
'apic_role:controller',
'node_id:2',
'fabric_state:unknown',
'fabric_pod_id:1',
'cisco',
'project:cisco_aci',
],
hostname='pod-1-node-2',
)
metric_name = 'cisco_aci.fabric.port.ingr_bytes.multicast.cum'
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=1884382.0, tags=tags101 + ['port:eth1/49'], hostname=hn101)
aggregator.assert_metric(metric_name, value=98284267.0, tags=tags101 + ['port:eth1/50'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101)
aggregator.assert_metric(metric_name, | |
from .comment_bridge import CommentBridge
from PIL import Image, ImageDraw, ImageFont
from matplotlib.pyplot import imshow
import numpy as np
import cv2
from typing import List, Dict
import random
import os
import shutil
import random as r
from pydub import AudioSegment
import moviepy.editor as mpe
from enum import IntEnum
import ffmpeg
from collections import Counter
import random
from textwrap import wrap
import spacy
from .polarity_analysis import Analizer
analizer = Analizer()
from .img import AnimImg
from .text import AnimText
from .scene import AnimScene
from .video import AnimVideo
from .constants import lag_frames, fps, lib_path, character_roles_and_gender, hd_video
from PIL import Image, ImageDraw, ImageFont
from . import constants
import re
import subprocess
nlp = spacy.load("xx_ent_wiki_sm")
nlp.add_pipe(nlp.create_pipe('sentencizer'))
def split_str_into_newlines(text: str, max_line_count: int = 34):
words = text.split(" ")
new_text = ""
for word in words:
last_sentence = new_text.split("\n")[-1] + word + " "
if len(last_sentence) >= max_line_count:
new_text += "\n" + word + " "
else:
new_text += word + " "
return new_text
def audio_duration(filename: str):
duration = float(ffmpeg.probe(filename)['streams'][0]['duration'])
return duration
# @profile
def do_video(config: List[Dict], output_filename):
scenes = []
sound_effects = []
part = 0
frames_since_video_start = 0
audio_start_frame = 0
for scene in config:
# We pick up the images to be rendered
bg = AnimImg(constants.location_map[scene["location"]])
arrow = AnimImg(f"{lib_path}/assets/arrow.png", x=881, y=637, w=56, h=56, key_x=19)
textbox = AnimImg(f"{lib_path}/assets/textbox4.png", w=bg.w)
bench = None
# constants.Location needs a more in-depth chose
if scene["location"] == constants.Location.COURTROOM_LEFT:
bench = AnimImg(f"{lib_path}/assets/locations/logo-left.png")
elif scene["location"] == constants.Location.COURTROOM_RIGHT:
bench = AnimImg(f"{lib_path}/assets/locations/logo-right.png")
elif scene["location"] == constants.Location.WITNESS_STAND:
bench = AnimImg(f"{lib_path}/assets/locations/witness_stand.png", w=bg.w)
bench.y = bg.h - bench.h
if "audio" in scene:
audio_start_frame = frames_since_video_start
audio_name = f'{lib_path}/assets/bgm/{scene["audio"]}.mp3'
audio_length = int(audio_duration(audio_name)*fps)
sound_effects.append({"_type": "bg", "length": audio_length, "src": audio_name, "start": audio_start_frame})
current_frame = 0
current_character_name = None
text = None
for obj in scene["scene"]:
# First we check for evidences
if "evidence" in obj and obj['evidence'] is not None:
if scene["location"] == constants.Location.COURTROOM_RIGHT:
ev_bg = AnimImg(f'{lib_path}/assets/evidence-bg.gif', x=97, y=71, w=256, maxh=256)
evidence = AnimImg(obj["evidence"], x=111, y=85, w=232, maxh=232)
evidence = AnimImg(obj["evidence"], x=111, y=int(85+((228-evidence.h)/2)), w=232, h=evidence.h)
else:
ev_bg = AnimImg(f'{lib_path}/assets/evidence-bg.gif', x=544, y=71, w=256, maxh=256)
evidence = AnimImg(obj["evidence"], x=558, y=85, w=232, maxh=232)
evidence = AnimImg(obj["evidence"], x=558, y=int(85+((228-evidence.h)/2)), w=232, h=evidence.h)
else:
ev_bg = None
evidence = None
if "character" in obj:
_dir = f'{lib_path}/assets/characters/Sprites-{obj["character"]}'
current_character_name = obj["character"]
font_size = 30
font_name = ImageFont.truetype(f'{lib_path}/assets/fonts/ace-name.ttf', size=font_size)
temp = f'{lib_path}/assets/locations/defenseempty.png'
img = Image.open(temp)
draw = ImageDraw.Draw(img)
w, h = draw.textsize(current_character_name, font_name)
text_h = (30-h)/2
if w>=367:
font_size = int(30*(367/w))
font_name = ImageFont.truetype(f'{lib_path}/assets/fonts/ace-name.ttf', size=font_size)
w, h = draw.textsize(current_character_name, font_name)
text_h = (30-h)/2
character_name = AnimText(
current_character_name,
font_path=f'{lib_path}/assets/fonts/ace-name.ttf',
font_size=font_size,
x=int(16+(367-w)/2),
y=430+text_h,
)
default = f"neutral/{current_character_name.lower()}-normal" if "emotion" not in obj else obj["emotion"]
default_path = (
f"{_dir}/{default}(a).gif"
)
if not os.path.isfile(default_path):
default_path = (
f"{_dir}/{default}.gif"
)
if not os.path.isfile(
default_path
):
default_path = (
f"{_dir}/neutral/{current_character_name.lower()}-normal(a).gif"
)
assert os.path.isfile(
default_path
), f"{default_path} does not exist"
default_character = AnimImg(default_path, half_speed=True)
if "(a)" in default_path:
talking_character = AnimImg(
default_path.replace("(a)", "(b)"), half_speed=True
)
else:
talking_character = AnimImg(default_path, half_speed=True)
if "emotion" in obj:
default = obj["emotion"]
default_path = (
f"{_dir}/{default}(a).gif"
)
if not os.path.isfile(default_path):
default_path = (
f"{_dir}/{default}.gif"
)
default_character = AnimImg(default_path, half_speed=True)
if "(a)" in default_path:
talking_character = AnimImg(
default_path.replace("(a)", "(b)"), half_speed=True
)
else:
talking_character = AnimImg(default_path, half_speed=True)
if "action" in obj and (
obj["action"] == constants.Action.TEXT
or obj["action"] == constants.Action.TEXT_SHAKE_EFFECT
):
character = talking_character
_text = split_str_into_newlines(obj["text"])
_colour = None if "colour" not in obj else obj["colour"]
text = AnimText(
_text,
font_path=f"{lib_path}/assets/fonts/Igiari.ttf",
font_size=56,
x=19,
y=487,
typewriter_effect=True,
colour=_colour,
)
num_frames = len(_text) + lag_frames
_character_name = character_name
if "name" in obj:
font_size = 30
font_name = ImageFont.truetype(f'{lib_path}/assets/fonts/ace-name.ttf', size=font_size)
temp = f'{lib_path}/assets/locations/defenseempty.png'
img = Image.open(temp)
draw = ImageDraw.Draw(img)
w, h = draw.textsize(obj["name"], font_name)
text_h = (30-h)/2
if w>=427:
font_size = int(30*(427/w))
font_name = ImageFont.truetype(f'{lib_path}/assets/fonts/ace-name.ttf', size=font_size)
w, h = draw.textsize(obj["name"], font_name)
text_h = (30-h)/2
_character_name = AnimText(
obj["name"],
font_path=f"{lib_path}/assets/fonts/ace-name.ttf",
font_size=font_size,
x=int(16+(427-w)/2),
y=430+text_h,
)
if obj["action"] == constants.Action.TEXT_SHAKE_EFFECT:
bg.shake_effect = True
character.shake_effect = True
if bench is not None:
bench.shake_effect = True
textbox.shake_effect = True
scene_objs = list(
filter(
lambda x: x is not None,
[bg, character, bench, textbox, _character_name, text, ev_bg, evidence],
)
)
scenes.append(
AnimScene(scene_objs, len(_text) - 1, start_frame=current_frame)
)
sound_effects.append({"_type": "bip", "length": len(_text) - 1, "gender": character_roles_and_gender[obj["character"]][1]})
if obj["action"] == constants.Action.TEXT_SHAKE_EFFECT:
bg.shake_effect = False
character.shake_effect = False
if bench is not None:
bench.shake_effect = False
textbox.shake_effect = False
text.typewriter_effect = False
character = default_character
scene_objs = list(
filter(
lambda x: x is not None,
[bg, character, bench, textbox, _character_name, text, arrow, ev_bg, evidence],
)
)
scenes.append(
AnimScene(scene_objs, lag_frames, start_frame=len(_text) - 1)
)
current_frame += num_frames
sound_effects.append({"_type": "silence", "length": lag_frames})
elif "action" in obj and obj["action"] == constants.Action.SHAKE_EFFECT:
bg.shake_effect = True
character.shake_effect = True
if bench is not None:
bench.shake_effect = True
textbox.shake_effect = True
character = default_character
if text is not None:
scene_objs = list(
filter(
lambda x: x is not None,
[
bg,
character,
bench,
textbox,
character_name,
text,
arrow,
ev_bg,
evidence,
],
)
)
else:
scene_objs = [bg, character, bench]
scenes.append(
AnimScene(scene_objs, lag_frames, start_frame=current_frame)
)
sound_effects.append({"_type": "shock", "length": lag_frames})
current_frame += lag_frames
bg.shake_effect = False
character.shake_effect = False
if bench is not None:
bench.shake_effect = False
textbox.shake_effect = False
elif "action" in obj and obj["action"] == constants.Action.OBJECTION:
# bg.shake_effect = True
# character.shake_effect = True
# if bench is not None:
# bench.shake_effect = True
objection = AnimImg(f"{lib_path}/assets/objection.gif") if obj["character"] != 'ROU' else AnimImg(f"{lib_path}/assets/notsofast.gif")
objection.shake_effect = True
character = default_character
scene_objs = list(
filter(lambda x: x is not None, [bg, character, bench, objection])
)
scenes.append(AnimScene(scene_objs, 11, start_frame=current_frame))
bg.shake_effect = False
if bench is not None:
bench.shake_effect = False
character.shake_effect = False
scene_objs = list(
filter(lambda x: x is not None, [bg, character, bench])
)
scenes.append(AnimScene(scene_objs, 11, start_frame=current_frame))
sound_effects.append(
{
"_type": "objection",
"character": current_character_name.lower(),
"length": 22,
}
)
current_frame += 11
else:
# list(filter(lambda x: x is not None, scene_objs))
character = default_character
scene_objs = list(
filter(lambda x: x is not None, [bg, character, bench, ev_bg, evidence])
)
_length = lag_frames
if "length" in obj:
_length = obj["length"]
if "repeat" in obj:
character.repeat = obj["repeat"]
scenes.append(AnimScene(scene_objs, _length, start_frame=current_frame))
character.repeat = True
sound_effects.append({"_type": "silence", "length": _length})
current_frame += _length
frames_since_video_start += current_frame
if (len(scenes) > 50):
video = AnimVideo(scenes, fps=fps)
video.render(output_filename + '/' +str(part) + '.mp4')
part+=1
scenes = []
if (len(scenes) > 0):
video = AnimVideo(scenes, fps=fps)
video.render(output_filename + '/' +str(part) + '.mp4')
return [sound_effects, frames_since_video_start]
def do_audio(sound_effects: List[Dict], output_filename, video_end_frame):
audio_se = AudioSegment.empty()
music_se = AudioSegment.empty()
blink = AudioSegment.from_wav(f"{lib_path}/assets/sfx general/sfx-blink.wav")
blink -= 10
badum = AudioSegment.from_wav(f"{lib_path}/assets/sfx general/sfx-fwashing.wav")
spf = 1 / fps * 1000
default_objection = AudioSegment.from_wav(f"{lib_path}/assets/sfx general/sfx-objection.wav")
bgms = [x for i, x in enumerate(sound_effects) if x["_type"] == "bg"]
cap = video_end_frame
start = 0
if len(bgms)>1:
cap = bgms[1]["start"]
l = cap
if l>bgms[0]["length"]:
music_se += AudioSegment.from_mp3(bgms[0]["src"])[:int((bgms[0]["length"]/fps)*1000)]
start = bgms[0]["length"]
l = cap-start
bgms[0]["src"] = f'{bgms[0]["src"][:-4]}-loop.mp3'
bgms[0]["length"] = int(audio_duration(bgms[0]["src"])*fps)
while l>bgms[0]["length"]:
music_se += AudioSegment.from_mp3(bgms[0]["src"])[:int((bgms[0]["length"]/fps)*1000)]
l -= bgms[0]["length"]
if l>0:
music_se += AudioSegment.from_mp3(bgms[0]["src"])[:int((l/fps)*1000)]
else:
music_se+=AudioSegment.from_mp3(bgms[0]["src"])[:int((l/fps)*1000)]
start = bgms[1]["start"]
cap = video_end_frame
l = cap - start
if l>bgms[1]["length"]:
music_se += AudioSegment.from_mp3(bgms[1]["src"])[:int((bgms[1]["length"]/fps)*1000)]
start+=bgms[1]["length"]
bgms[1]["src"] = f'{bgms[1]["src"][:-4]}-loop.mp3'
bgms[1]["length"] = int(audio_duration(bgms[1]["src"])*fps)
while l>bgms[1]["length"]:
music_se += AudioSegment.from_mp3(bgms[1]["src"])[:int((bgms[1]["length"]/fps)*1000)]
l -= bgms[1]["length"]
if l>0:
music_se += AudioSegment.from_mp3(bgms[1]["src"])[:int((l/fps)*1000)]
else:
music_se += AudioSegment.from_mp3(bgms[1]["src"])[:int((l/fps)*1000)]
else:
l = cap
if l>bgms[0]["length"]:
music_se += AudioSegment.from_mp3(bgms[0]["src"])[:int((bgms[0]["length"]/fps)*1000)]
start = bgms[0]["length"]
l = cap-start
bgms[0]["src"] = f'{bgms[0]["src"][:-4]}-loop.mp3'
bgms[0]["length"] = int(audio_duration(bgms[0]["src"])*fps)
while l>bgms[0]["length"]:
music_se += AudioSegment.from_mp3(bgms[0]["src"])[:int((bgms[0]["length"]/fps)*1000)]
l -= bgms[0]["length"]
if l>0:
music_se += AudioSegment.from_mp3(bgms[0]["src"])[:int((l/fps)*1000)]
else:
music_se+=AudioSegment.from_mp3(bgms[0]["src"])[:int((l/fps)*1000)]
for obj in sound_effects:
if obj["_type"] == "silence":
audio_se += AudioSegment.silent(duration=int(obj["length"] * spf))
elif obj["_type"] == "bip":
bip = bip = AudioSegment.from_wav(
f"{lib_path}/assets/sfx general/sfx-blip{obj['gender']}.wav"
) + AudioSegment.silent(duration=50)
long_bip = bip * 100
long_bip -= 10
audio_se += blink + long_bip[: max(int(obj["length"] * spf - len(blink)), 0)]
elif obj["_type"] == "objection":
if character_roles_and_gender[obj["character"].upper()][0] in ['attorney', 'prosecutor']:
objection = AudioSegment.from_mp3(f'{lib_path}/assets/objections/objection ({obj["character"]}).mp3')
audio_se += objection[: int(obj["length"] * spf)]
else:
audio_se += default_objection[: int(obj["length"] * spf)]
elif obj["_type"] == "shock":
audio_se += badum[: int(obj["length"] * spf)]
final_se = music_se.overlay(audio_se)
final_se.export(output_filename, format="mp3")
def ace_attorney_anim(config: List[Dict], output_filename: str = "output.mp4"):
root_filename = output_filename[:-4]
audio_filename = output_filename + '.audio.mp3'
text_filename = root_filename + '.txt'
if os.path.exists(root_filename):
shutil.rmtree(root_filename)
os.mkdir(root_filename)
sound_effects = do_video(config, root_filename)
do_audio(sound_effects[0], audio_filename, sound_effects[1])
videos = []
with open(text_filename, 'w') as | |
mock.Mock()
mock_api.endpoint_url = None
self.deployment_inst = api._Deployment(mock_api)
self.deployment_config = copy.deepcopy(FAKE_DEPLOYMENT_CONFIG)
self.deployment_uuid = "599bdf1d-fe77-461a-a810-d59b1490f4e3"
creds = copy.deepcopy(FAKE_DEPLOYMENT_CONFIG)["creds"]
admin_credential = creds["openstack"]
admin_credential["endpoint"] = None
admin_credential.update(admin_credential.pop("admin"))
admin_credential["permission"] = consts.EndpointPermission.ADMIN
admin_credential["https_insecure"] = False
admin_credential["https_cacert"] = None
self.credentials = {"admin": admin_credential, "users": []}
self.deployment = {
"uuid": self.deployment_uuid,
"name": "fake_name",
"config": self.deployment_config,
"credentials": {"openstack": [self.credentials]}
}
class DeploymentAPITestCase(BaseDeploymentTestCase):
@mock.patch("rally.common.objects.deploy.db.deployment_update")
@mock.patch("rally.common.objects.deploy.db.deployment_create")
@mock.patch("rally.deployment.engines.existing.ExistingCloud.validate")
def test_create(self, mock_existing_cloud_validate,
mock_deployment_create, mock_deployment_update):
mock_deployment_create.return_value = self.deployment
mock_deployment_update.return_value = self.deployment
dep = self.deployment_inst.create(config=self.deployment_config,
name="fake_deployment")
self.assertIsInstance(dep, dict)
mock_deployment_create.assert_called_once_with({
"name": "fake_deployment",
"config": self.deployment_config,
})
mock_existing_cloud_validate.assert_called_once_with()
mock_deployment_update.assert_has_calls([
mock.call(self.deployment_uuid,
{"credentials": {"openstack": [self.credentials]}})
])
@mock.patch("rally.common.objects.deploy.db.deployment_update")
@mock.patch("rally.common.objects.deploy.db.deployment_create")
@mock.patch("rally.deployment.engines.existing.ExistingCloud.validate",
side_effect=jsonschema.ValidationError("ValidationError"))
def test_create_validation_error(
self, mock_existing_cloud_validate, mock_deployment_create,
mock_deployment_update):
mock_deployment_create.return_value = self.deployment
self.assertRaises(jsonschema.ValidationError,
self.deployment_inst.create,
config=self.deployment_config,
name="fake_deployment")
mock_deployment_update.assert_called_once_with(
self.deployment_uuid,
{"status": consts.DeployStatus.DEPLOY_FAILED})
@mock.patch("rally.api.LOG")
@mock.patch("rally.common.objects.deploy.db.deployment_create",
side_effect=exceptions.DeploymentNameExists(
deployment="fake_deploy"))
def test_create_duplication_error(self, mock_deployment_create, mock_log):
self.assertRaises(exceptions.DeploymentNameExists,
self.deployment_inst.create,
config=self.deployment_config,
name="fake_deployment")
@mock.patch("rally.common.objects.deploy.db.deployment_delete")
@mock.patch("rally.common.objects.deploy.db.deployment_update")
@mock.patch("rally.common.objects.deploy.db.deployment_get")
def test_destroy(self, mock_deployment_get, mock_deployment_update,
mock_deployment_delete):
mock_deployment_get.return_value = self.deployment
mock_deployment_update.return_value = self.deployment
list_verifiers = [{"name": "f1", "uuid": "1"},
{"name": "f2", "uuid": "2"}]
self.deployment_inst.api.verifier.list.return_value = list_verifiers
self.deployment_inst.destroy(deployment=self.deployment_uuid)
mock_deployment_get.assert_called_once_with(self.deployment_uuid)
mock_deployment_delete.assert_called_once_with(self.deployment_uuid)
self.deployment_inst.api.verifier.list.assert_called_once_with()
self.assertEqual(
[mock.call(verifier_id=m["name"],
deployment_id=self.deployment["name"],
force=True)
for m in list_verifiers],
self.deployment_inst.api.verifier.delete.call_args_list)
@mock.patch("rally.common.objects.deploy.db.deployment_update")
@mock.patch("rally.common.objects.deploy.db.deployment_get")
def test_recreate(self, mock_deployment_get, mock_deployment_update):
mock_deployment_get.return_value = self.deployment
mock_deployment_update.return_value = self.deployment
self.deployment_inst.recreate(deployment=self.deployment_uuid)
mock_deployment_get.assert_called_once_with(self.deployment_uuid)
mock_deployment_update.assert_has_calls([
mock.call(
self.deployment_uuid,
{"credentials":
{"openstack": [{"admin": self.credentials["admin"],
"users": self.credentials["users"]}]}})
])
@mock.patch("rally.common.objects.deploy.db.deployment_update")
@mock.patch("rally.common.objects.deploy.db.deployment_get")
def test_recreate_config(self, mock_deployment_get,
mock_deployment_update):
mock_deployment_get.return_value = self.deployment
mock_deployment_update.return_value = self.deployment
config = copy.deepcopy(self.deployment_config)
config["creds"]["openstack"]["admin"] = {
"username": "admin",
"password": "<PASSWORD>",
"tenant_name": "demo"}
config["creds"]["openstack"]["users"] = [
{"username": "user1",
"password": "<PASSWORD>",
"tenant_name": "demo"}]
self.deployment_inst.recreate(deployment=self.deployment_uuid,
config=config)
mock_deployment_get.assert_called_once_with(self.deployment_uuid)
mock_deployment_update.assert_has_calls([
mock.call(self.deployment_uuid, {"config": config}),
])
@mock.patch("rally.common.objects.deploy.db.deployment_update")
@mock.patch("rally.common.objects.deploy.db.deployment_get")
def test_recreate_old_config(self, mock_deployment_get,
mock_deployment_update):
mock_deployment_get.return_value = self.deployment
mock_deployment_update.return_value = self.deployment
config = copy.deepcopy(self.deployment_config["creds"])
config["openstack"]["admin"] = {
"username": "admin",
"password": "<PASSWORD>",
"tenant_name": "demo"}
config["openstack"]["users"] = [
{"username": "user1",
"password": "<PASSWORD>",
"tenant_name": "demo"}]
self.deployment_inst.recreate(deployment=self.deployment_uuid,
config=config)
mock_deployment_get.assert_called_once_with(self.deployment_uuid)
mock_deployment_update.assert_has_calls([
mock.call(self.deployment_uuid,
{"config": {"type": "ExistingCloud", "creds": config}}),
])
@mock.patch("rally.common.objects.deploy.db.deployment_update")
@mock.patch("rally.common.objects.deploy.db.deployment_get")
def test_recreate_config_invalid(self, mock_deployment_get,
mock_deployment_update):
mock_deployment_get.return_value = self.deployment
mock_deployment_update.return_value = self.deployment
config = copy.deepcopy(self.deployment_config)
config["admin"] = {"foo": "bar"}
self.assertRaises(jsonschema.ValidationError,
self.deployment_inst.recreate,
deployment=self.deployment_uuid,
config=config)
@mock.patch("rally.common.objects.deploy.db.deployment_update")
@mock.patch("rally.common.objects.deploy.db.deployment_get")
def test_recreate_config_wrong_type(self, mock_deployment_get,
mock_deployment_update):
mock_deployment_get.return_value = self.deployment
mock_deployment_update.return_value = self.deployment
config = copy.deepcopy(self.deployment_config)
config["type"] = "foo"
self.assertRaises(exceptions.RallyException,
self.deployment_inst.recreate,
deployment=self.deployment_uuid,
config=config)
@mock.patch("rally.common.objects.deploy.db.deployment_get")
def test_get(self, mock_deployment_get):
origin_config = copy.deepcopy(self.deployment_config)
deployment_id = "aaaa-bbbb-cccc-dddd"
mock_deployment_get.return_value = self.deployment
ret = self.deployment_inst.get(deployment=deployment_id)
for key in self.deployment:
self.assertIn(key, ret)
if key != "config":
self.assertEqual(self.deployment[key], ret[key])
self.assertEqual(origin_config["creds"], ret["config"])
@mock.patch("rally.common.objects.deploy.db.deployment_get")
def test_get_deprecated_formats(self, mock_deployment_get):
origin_config = copy.deepcopy(self.deployment_config)
self.deployment_config.update(
**self.deployment_config.pop("creds")["openstack"])
deployment_id = "aaaa-bbbb-cccc-dddd"
mock_deployment_get.return_value = self.deployment
ret = self.deployment_inst.get(deployment=deployment_id)
for key in self.deployment:
self.assertIn(key, ret)
if key != "config":
self.assertEqual(self.deployment[key], ret[key])
origin_config.pop("type")
self.assertEqual(origin_config["creds"], ret["config"])
@mock.patch("rally.common.objects.Deployment.list")
def test_list(self, mock_deployment_list):
mock_deployment = mock.Mock()
mock_deployment.to_dict.return_value = self.deployment
mock_deployment_list.return_value = [mock_deployment]
ret = self.deployment_inst.list()
for key in self.deployment:
self.assertEqual(ret[0][key], self.deployment[key])
@mock.patch("rally.common.objects.Deployment.get")
def test_deployment_check(self, mock_deployment_get):
fake_credential1 = fakes.fake_credential()
fake_credential2 = fakes.fake_credential()
mock_deployment_get.return_value.get_all_credentials.return_value = {
"openstack": [{"admin": fake_credential1,
"users": [fake_credential2]}]}
self.assertEqual(
{"openstack": [
{"services": fake_credential1.list_services.return_value}]},
self.deployment_inst.check(deployment="uuid"))
fake_credential1.verify_connection.assert_called_once_with()
fake_credential2.verify_connection.assert_called_once_with()
@mock.patch("rally.common.objects.Deployment.get")
def test_deployment_check_list_services_via_admin(self,
mock_deployment_get):
fake_credential1 = fakes.fake_credential()
fake_credential2 = fakes.fake_credential()
mock_deployment_get.return_value.get_all_credentials.return_value = {
"openstack": [{"admin": fake_credential1,
"users": [fake_credential2]}]}
self.assertEqual(
{"openstack": [
{"services": fake_credential1.list_services.return_value}]},
self.deployment_inst.check(deployment="uuid"))
fake_credential1.verify_connection.assert_called_once_with()
fake_credential1.list_services.assert_called_once_with()
fake_credential2.verify_connection.assert_called_once_with()
self.assertFalse(fake_credential2.list_services.called)
@mock.patch("rally.common.objects.Deployment.get")
def test_deployment_check_list_services_via_user(self,
mock_deployment_get):
fake_credential1 = fakes.fake_credential()
fake_credential2 = fakes.fake_credential()
mock_deployment_get.return_value.get_all_credentials.return_value = {
"openstack": [{"admin": None,
"users": [fake_credential2, fake_credential1]}]}
self.assertEqual(
{"openstack": [
{"services": fake_credential2.list_services.return_value}]},
self.deployment_inst.check(deployment="uuid"))
fake_credential2.verify_connection.assert_called_once_with()
fake_credential2.list_services.assert_called_once_with()
fake_credential1.verify_connection.assert_called_once_with()
self.assertFalse(fake_credential1.list_services.called)
@mock.patch("rally.api.traceback")
@mock.patch("rally.common.objects.Deployment.get")
def test_deployment_check_fails(self, mock_deployment_get, mock_traceback):
mock_traceback.format_exc.side_effect = ("Trace1", "Trace2")
fake_credential1 = fakes.fake_credential()
fake_credential2 = fakes.fake_credential()
fake_credential1.verify_connection.side_effect = KeyError("oops")
fake_credential2.verify_connection.side_effect = TypeError("ooooops")
mock_deployment_get.return_value.get_all_credentials.return_value = {
"openstack": [{"admin": fake_credential1,
"users": [fake_credential2]}]}
self.assertEqual(
{"openstack": [
{"services": [],
"admin_error": {
"etype": "KeyError", "msg": "'oops'",
"trace": "Trace1"},
"user_error": {
"etype": "TypeError", "msg": "ooooops",
"trace": "Trace2"}}]},
self.deployment_inst.check(deployment="uuid"))
fake_credential1.verify_connection.assert_called_once_with()
fake_credential2.verify_connection.assert_called_once_with()
self.assertFalse(fake_credential1.list_services.called)
self.assertFalse(fake_credential2.list_services.called)
def test_service_list(self):
fake_credential = fakes.fake_credential()
deployment = mock.Mock(spec=objects.Deployment)
deployment.get_credentials_for.return_value = {
"admin": fake_credential, "users": []}
result = self.deployment_inst.service_list(deployment=deployment)
self.assertEqual(fake_credential.list_services.return_value, result)
class APITestCase(test.TestCase):
@mock.patch("os.path.isfile", return_value=False)
@mock.patch("rally.common.version.database_revision",
return_value={"revision": "foobar", "current_head": "foobar"})
@mock.patch("rally.common.version.version_string", return_value="0.0.0")
@mock.patch("rally.api.CONF", spec=cfg.CONF)
def test_init_config_args(self, mock_conf, mock_version_string,
mock_database_revision, mock_isfile):
api_ = api.API(config_args=["foo", "bar", "baz"])
mock_conf.assert_called_once_with(
["foo", "bar", "baz"], default_config_files=None,
project="rally", version="0.0.0")
self.assertIsInstance(api_._deployment, api._Deployment)
self.assertIsInstance(api_._task, api._Task)
@mock.patch("os.path.isfile", return_value=False)
@mock.patch("rally.common.version.database_revision",
return_value={"revision": "foobar", "current_head": "foobar"})
@mock.patch("rally.common.version.version_string", return_value="0.0.0")
@mock.patch("rally.api.CONF", spec=cfg.CONF)
def test_init_config_file(self, mock_conf, mock_version_string,
mock_database_revision, mock_isfile):
api_ = api.API(config_file="myfile.conf")
mock_conf.assert_called_once_with(
[], default_config_files=["myfile.conf"],
project="rally", version="0.0.0")
self.assertIsInstance(api_._deployment, api._Deployment)
self.assertIsInstance(api_._task, api._Task)
@mock.patch("os.path.isfile", return_value=False)
@mock.patch("rally.common.version.database_revision",
return_value={"revision": "foobar", "current_head": "foobar"})
@mock.patch("rally.common.version.version_string", return_value="0.0.0")
@mock.patch("rally.api.CONF", spec=cfg.CONF)
def test_init_no_default_config_file(self, mock_conf, mock_version_string,
mock_database_revision, mock_isfile):
api.API(skip_db_check=True)
mock_conf.assert_called_once_with(
[], default_config_files=None, project="rally", version="0.0.0")
@mock.patch("os.path.isfile")
@mock.patch("rally.common.version.database_revision",
return_value={"revision": "foobar", "current_head": "foobar"})
@mock.patch("rally.common.version.version_string", return_value="0.0.0")
@mock.patch("rally.api.CONF", spec=cfg.CONF)
def test_init_default_config_file(self, mock_conf, mock_version_string,
mock_database_revision, mock_isfile):
mock_isfile.side_effect = lambda f: f == "/etc/rally/rally.conf"
api.API(skip_db_check=True)
mock_conf.assert_called_once_with(
[], default_config_files=["/etc/rally/rally.conf"],
project="rally", version="0.0.0")
@mock.patch("os.path.isfile", return_value=False)
@mock.patch("rally.common.version.version_string", return_value="0.0.0")
@mock.patch("rally.api.CONF", spec=cfg.CONF)
def test_init_exception(self, mock_conf, mock_version_string, mock_isfile):
mock_conf.side_effect = cfg.ConfigFilesNotFoundError(["file1",
"file2"])
self.assertRaises(exceptions.RallyException, api.API)
mock_conf.assert_called_once_with(
[], default_config_files=None, project="rally", version="0.0.0")
@mock.patch("os.path.isfile", return_value=False)
@mock.patch("rally.common.plugin.discover.load_plugins")
@mock.patch("rally.common.version.database_revision",
return_value={"revision": "foobar", "current_head": "foobar"})
@mock.patch("rally.common.version.version_string", return_value="0.0.0")
@mock.patch("rally.api.CONF", spec=cfg.CONF)
def test_init_plugin_path(self, mock_conf, mock_version_string,
mock_database_revision, mock_load_plugins,
mock_isfile):
mock_conf.__contains__.return_value = True
mock_conf.get.side_effect = (
lambda a: ["/path/from/args"] if a == "plugin_paths" else None)
api.API(plugin_paths=["/my/path"])
mock_conf.assert_called_once_with([], default_config_files=None,
project="rally", version="0.0.0")
mock_load_plugins.assert_has_calls([
mock.call("/my/path"),
mock.call("/path/from/args"),
])
@mock.patch("os.path.isfile", return_value=False)
@mock.patch("rally.common.version.database_revision",
return_value={"revision": "spam", "current_head": "foobar"})
@mock.patch("rally.common.version.version_string", return_value="0.0.0")
@mock.patch("rally.api.CONF", spec=cfg.CONF)
def test_init_check_revision_exception(self, mock_conf,
mock_version_string,
mock_database_revision,
mock_isfile):
exc = self.assertRaises(exceptions.RallyException, api.API)
self.assertIn("rally db upgrade", str(exc))
mock_conf.assert_called_once_with(
[], default_config_files=None, project="rally", version="0.0.0")
@mock.patch("os.path.isfile", return_value=False)
@mock.patch("rally.common.version.database_revision",
return_value={"revision": None, "current_head": "foobar"})
@mock.patch("rally.common.version.version_string", return_value="0.0.0")
@mock.patch("rally.api.CONF", spec=cfg.CONF)
def test_init_check_revision_exception_no_db(self, mock_conf,
mock_version_string,
mock_database_revision,
mock_isfile):
exc = self.assertRaises(exceptions.RallyException, api.API)
self.assertIn("rally db create", str(exc))
mock_conf.assert_called_once_with(
[], default_config_files=None, project="rally", version="0.0.0")
def test_version(self):
api_inst = api.API(skip_db_check=True)
self.assertEqual(1, api_inst.version)
@mock.patch("requests.request")
def test__request(self, mock_request):
api_inst = api.API(skip_db_check=True)
method = "test"
path = "path"
response = mock_request.return_value
response.status_code = 200
response.json.return_value = {"result": "test"}
self.assertEqual("test", api_inst._request(path=path, method=method))
@mock.patch("requests.request")
@mock.patch("rally.exceptions.find_exception")
def test__request_fail(self, mock_find_exception, mock_request):
api_inst = api.API(skip_db_check=True)
method = "test"
path = "path"
response = mock_request.return_value
mock_find_exception.return_value = exceptions.RallyException()
response.status_code = 201
response.json.return_value = {"result": "test"}
self.assertRaises(exceptions.RallyException,
api_inst._request, path=path, method=method)
class FakeVerifierManager(object):
NAME = "fake_verifier"
PLATFORM = "tests"
TITLE = "Fake verifier which is used only for testing purpose"
@classmethod
def get_name(cls):
return cls.NAME
@classmethod
def get_platform(cls):
return cls.PLATFORM
@classmethod
def get_info(cls):
return {"title": cls.TITLE}
class VerifierAPITestCase(test.TestCase):
def setUp(self):
super(VerifierAPITestCase, self).setUp()
mock_api = mock.Mock()
mock_api.endpoint_url = None
self.verifier_inst = api._Verifier(mock_api)
@mock.patch("rally.api.vmanager.VerifierManager.get_all")
def test_list_plugins(self, mock_verifier_manager_get_all):
platform = "some"
mock_verifier_manager_get_all.return_value = [FakeVerifierManager]
self.assertEqual(
[{"name": FakeVerifierManager.NAME,
"platform": FakeVerifierManager.PLATFORM,
"description": FakeVerifierManager.TITLE,
"location": "%s.%s" % (FakeVerifierManager.__module__,
FakeVerifierManager.__name__)}],
self.verifier_inst.list_plugins(platform=platform))
mock_verifier_manager_get_all.assert_called_once_with(
platform=platform)
@mock.patch("rally.api.objects.Verifier.get")
def test_get(self, mock_verifier_get):
uuid = "some"
self.assertEqual(mock_verifier_get.return_value.to_dict(),
self.verifier_inst.get(verifier_id=uuid))
mock_verifier_get.assert_called_once_with(uuid)
@mock.patch("rally.api.objects.Verifier.list")
def test_list(self, mock_verifier_list):
status = "some_special_status"
mock_verifier_list.return_value = [mock.Mock()]
self.assertEqual(
[i.to_dict() for i in mock_verifier_list.return_value],
self.verifier_inst.list(status=status))
mock_verifier_list.assert_called_once_with(status)
@mock.patch("rally.api.objects.Verifier.create")
@mock.patch("rally.api._Verifier._get")
@mock.patch("rally.api.vmanager.VerifierManager.get")
def test_create(self, mock_verifier_manager_get, mock___verifier__get,
mock_verifier_create):
mock___verifier__get.side_effect = exceptions.ResourceNotFound(id="1")
name = "SomeVerifier"
vtype = "fake_verifier"
platform = "tests"
source = "https://example.com"
version = "3.1415"
system_wide = True
extra_settings = {"verifier_specific_option": "value_for_it"}
verifier_obj = mock_verifier_create.return_value
verifier_obj.manager.get_platform.return_value = platform
verifier_obj.manager._meta_get.side_effect = [source]
verifier_uuid = self.verifier_inst.create(
name=name, vtype=vtype, version=version,
system_wide=system_wide, extra_settings=extra_settings)
mock_verifier_manager_get.assert_called_once_with(vtype,
platform=None)
mock___verifier__get.assert_called_once_with(name)
mock_verifier_create.assert_called_once_with(
name=name, source=None, system_wide=system_wide, version=version,
vtype=vtype, platform=None, extra_settings=extra_settings)
self.assertEqual(verifier_obj.uuid, verifier_uuid)
verifier_obj.update_properties.assert_called_once_with(
platform=platform, source=source)
self.assertEqual([mock.call(consts.VerifierStatus.INSTALLING),
mock.call(consts.VerifierStatus.INSTALLED)],
verifier_obj.update_status.call_args_list)
verifier_obj.manager.install.assert_called_once_with()
@mock.patch("rally.api.objects.Verifier.create")
@mock.patch("rally.api._Verifier._get")
@mock.patch("rally.api.vmanager.VerifierManager.get")
def test_create_fails_on_existing_verifier(
self, mock_verifier_manager_get, mock___verifier__get,
mock_verifier_create):
name = "SomeVerifier"
vtype = "fake_verifier"
platform = "tests"
source = "https://example.com"
version = "3.1415"
system_wide = True
extra_settings = {"verifier_specific_option": "value_for_it"}
self.assertRaises(exceptions.RallyException,
self.verifier_inst.create,
name=name, vtype=vtype, platform=platform,
source=source, version=version,
system_wide=system_wide,
extra_settings=extra_settings)
mock_verifier_manager_get.assert_called_once_with(vtype,
platform=platform)
mock___verifier__get.assert_called_once_with(name)
self.assertFalse(mock_verifier_create.called)
@mock.patch("rally.api.objects.Verifier.create")
@mock.patch("rally.api._Verifier._get")
@mock.patch("rally.api.vmanager.VerifierManager.get")
def test_create_fails_on_install_step(
self, mock_verifier_manager_get, mock___verifier__get,
mock_verifier_create):
mock___verifier__get.side_effect = exceptions.ResourceNotFound(id="1")
verifier_obj = mock_verifier_create.return_value
verifier_obj.manager.install.side_effect = RuntimeError
name = "SomeVerifier"
vtype = "fake_verifier"
platform = "tests"
source = "https://example.com"
version = "3.1415"
system_wide = True
extra_settings = {"verifier_specific_option": "value_for_it"}
self.assertRaises(RuntimeError,
self.verifier_inst.create,
name=name, vtype=vtype, platform=platform,
source=source, version=version,
system_wide=system_wide,
extra_settings=extra_settings)
mock_verifier_manager_get.assert_called_once_with(
vtype, platform=platform)
mock___verifier__get.assert_called_once_with(name)
mock_verifier_create.assert_called_once_with(
name=name, source=source, system_wide=system_wide, version=version,
vtype=vtype, platform=platform, extra_settings=extra_settings)
self.assertEqual([mock.call(consts.VerifierStatus.INSTALLING),
mock.call(consts.VerifierStatus.FAILED)],
verifier_obj.update_status.call_args_list)
verifier_obj.manager.install.assert_called_once_with()
@mock.patch("rally.api.objects.Verifier.delete")
@mock.patch("rally.common.objects.Verifier.get")
def test_delete_no_verifications(self, mock_verifier_get,
mock_verifier_delete):
self.verifier_inst.api.verification.list
self.verifier_inst.api.verification.list.return_value = []
verifier_obj = mock_verifier_get.return_value
verifier_id = "uuuiiiddd"
deployment_id = "deployment"
# remove just deployment specific data
self.verifier_inst.delete(verifier_id=verifier_id,
deployment_id=deployment_id)
self.assertFalse(mock_verifier_delete.called)
self.verifier_inst.api.verification.list.assert_called_once_with(
verifier_id=verifier_id, deployment_id=deployment_id)
verifier_obj.set_deployment.assert_called_once_with(deployment_id)
verifier_obj.manager.uninstall.assert_called_once_with()
verifier_obj.set_deployment.reset_mock()
verifier_obj.manager.uninstall.reset_mock()
self.verifier_inst.api.verification.list.reset_mock()
# remove the whole verifier
self.verifier_inst.delete(verifier_id=verifier_id)
self.verifier_inst.api.verification.list.assert_called_once_with(
verifier_id=verifier_id, deployment_id=None)
self.assertFalse(verifier_obj.set_deployment.called)
verifier_obj.manager.uninstall.assert_called_once_with(full=True)
mock_verifier_delete.assert_called_once_with(verifier_id)
@mock.patch("rally.common.objects.Verifier.get")
@mock.patch("rally.api.objects.Verifier.delete")
def test_delete_with_verifications(self,
mock_verifier_delete,
mock_verifier_get):
verifications = [{"uuid": "uuid_1"}, {"uuid": "uuid_2"}]
verifier_id = "uuuiiiddd"
self.assertRaises(exceptions.RallyException,
self.verifier_inst.delete,
verifier_id=verifier_id)
self.verifier_inst.api.verification.list.assert_called_once_with(
verifier_id=verifier_id, deployment_id=None)
self.assertFalse(self.verifier_inst.api.verification.delete.called)
self.verifier_inst.api.reset_mock()
self.verifier_inst.api.verification.list.return_value = verifications
self.verifier_inst.delete(verifier_id=verifier_id, force=True)
self.verifier_inst.api.verification.list.assert_called_once_with(
verifier_id=verifier_id, deployment_id=None)
self.assertEqual(
[mock.call(verification_uuid=v["uuid"]) for v in verifications],
self.verifier_inst.api.verification.delete.call_args_list)
@mock.patch("rally.api.utils.BackupHelper")
@mock.patch("rally.api._Verifier._get")
def test_update_failed(self, mock___verifier__get, mock_backup_helper):
verifier_obj = mock___verifier__get.return_value
verifier_obj.system_wide = False
uuid = "uuuuiiiidddd"
e = self.assertRaises(exceptions.RallyException,
self.verifier_inst.update,
verifier_id=uuid)
self.assertIn("At least one of the following parameters should be",
"%s" % e)
for status in consts.VerifierStatus:
if status != consts.VerifierStatus.INSTALLED:
verifier_obj.status = status
e = self.assertRaises(exceptions.RallyException,
self.verifier_inst.update,
verifier_id=uuid, system_wide=True)
self.assertIn("because verifier is in | |
import termii_switch
import termii_token
import termii_insight
class Client:
"""
Creates a termi client using the api_key
...
Attributes:
api_key: str
The termii developer API Key to create a client from.
Methods:
fetch_sender_ids: A method to request new termii sender ID.
request_sender_id: A method to request new termii sender ID.
send_message: A method to send a message using the termii API.
send_bulk_sms: A method to send bulk sms messages using the termii API.
send_message_with_autogenerated_number: A method to send messages to customers using Termii's auto-generated messaging numbers that adapt to customers location.
send_device_template: A method to set a device template for the one-time-passwords (pins) sent to their customers via whatsapp or sms.
fetch_phonebooks: A method to get all the phonebooks associated to a termii client
create_phonebook: A method to create a phonebook using the termii API
update_phonebook: A method to update phonebook using the termii API
delete_phonebook: A method to delete a phonebook using the termii API
fetch_contacts: A method to get all the contacts associated to a termii phonebook
add_new_contact: A method to add a single contact to a phonebook using the termii API
add_contacts: A method to add contacts to a phonebook using the termii API
delete_contact: A method to delete contacts from a phonebook using the termii API
send_campaign: A method to send campaigns using the termii API
fetch_campaigns: A method to get the all campaigns associated with a client
fetch_campaign_history: A method to get the history of a certain campaign
get_balance: A method to check a client's termii balance
search_number: A method to verify phone numbers and automatically detect their status
search_number_status: A method to detect if a number is fake or has ported to a new network.
fetch_history: A method that returns reports for messages sent across the sms, voice & whatsapp channels.
send_token: A method that allows businesses trigger one-time-passwords(OTP) across any available messaging channel on Termii.
voice_token: A method that enables you to generate and trigger one-time-passwords via a voice channel to a phone number.
voice_call: A method that enables you to send messages from your application through a voice channel to a client's phone number.
verify_token: A method that checks tokens sent to customers and returns a response confirming the status of the token.
in_app_token: A method that returns OTP code in JSON fromat which can be used in any web or mobile app.
"""
def __init__(self, api_key):
self.api_key = api_key
""" START OF METHODS FOR SWITCH"""
def fetch_sender_ids(self):
"""
A method to request new termii sender ID.
"""
response = termii_switch.get_sender_ids(self.api_key)
return response
def request_sender_id(self, sender_id, usecase, company):
"""
A method to request new termii sender ID.
Params:
sender_id: str
The name of the new sender_id to create
usecase: str
The usecase of the new sender_id. Must be at least 20 characters
company: str
The name of the company associated with this sender_id
"""
response = termii_switch.request_new_sender_id(self.api_key, sender_id, usecase, company)
return response
def send_message(self, number_to, sender_id, message, message_type, channel, media_dict):
"""
A method to send a message using the termii API.
Params:
number_to: str
The phone number the message should be sent to in international format. '+' should be excluded
sender_id: str
The sender id this message should be sent from and identify with
message: str
The message to be sent.
message_type: str
The type of message to be sent. Should be 'plain'
channel: str
The channel this message should be sent with. Can be 'dnd', 'whatsapp' or 'generic'
media_dict: dict
A dictionary containing the options for media if applicable. Should contain 'url' and 'caption' keys. Pass an empty dictionary if not applicable
"""
response = termii_switch.post_message(self.api_key, number_to, sender_id, message, message_type, channel, media_dict)
return response
def send_bulk_sms(self, numbers_to, sender_id, message, message_type, channel):
"""
A method to send bulk sms messages using the termii API.
Params:
numbers_to: str
An array containing the phone numbers the message should be sent to in international format. '+' should be excluded
sender_id: str
The sender id this message should be sent from and identify with
message: str
The message to be sent.
message_type: str
The type of message to be sent. Should be 'plain'
channel: str
The channel this message should be sent with. Can be 'dnd', 'whatsapp' or 'generic'
"""
response = termii_switch.post_message_bulk(self.api_key, numbers_to, sender_id, message, message_type, channel)
return response
def send_message_with_autogenerated_number(self, number_to, message):
"""
A method to send messages to customers using Termii's auto-generated messaging numbers that adapt to customers location.
Params:
number_to: str
The phone number the message should be sent to in international format. '+' should be excluded
message: str
The message to be sent.
"""
response = termii_switch.number_message_send(self.api_key, number_to, message)
return response
def send_device_template(self, phone_number, device_id, template_id, data):
"""
A method to set a device template for the one-time-passwords (pins) sent to their customers via whatsapp or sms.
Params:
phone_number: str
The destination phone number. Phone number must be in the international format without '+'
device_id: str
Represents the Device ID for Whatsapp. It can be Alphanumeric. It should be passed when the message is sent via whatsapp (It can be found on the manage device page on your Termii dashboard)
template_id: str
The ID of the template used
data: dict
Represents an object of key: value pair. The keys for the data object can be found on the device subscription page on your dashboard.
"""
response = termii_switch.template_setter(self.api_key, phone_number, device_id, template_id, data)
return response
def fetch_phonebooks(self):
"""
A method to get all the phonebooks associated to a termii client
"""
response = termii_switch.get_phonebooks(self.api_key)
return response
def create_phonebook(self, description, phonebook_name):
"""
A method to create a phonebook using the termii API
Params:
description: str
A description of the contacts stored in the phonebook
phonebook_name: str
The name of the phonebook
"""
response = termii_switch.make_phonebook(self.api_key, description, phonebook_name)
return response
def update_phonebook(self, phonebook_id, phonebook_name, phone_description):
"""
A method to update a phonebook using the termii API
Params:
phonebook_id: str
The id of the phonebook to be updated
phonebook_name: str
The new name of the phonebook
phonebook_description: str
The new description of the phonebook
"""
response = termii_switch.patch_phonebook(self.api_key, phonebook_id, phonebook_name, phone_description)
return response
def delete_phonebook(self, phonebook_id):
"""
A method to delete a phonebook using the termii API
Params:
phonebook_id: str
The id of the phonebook to be updated
"""
response = termii_switch.remove_phonebook(self.api_key, phonebook_id)
return response
def fetch_contacts(self, phonebook_id):
"""
A method to get all the contacts associated to a termii phonebook
Params:
phonebook_id: str
The id of the phonebook
"""
response = termii_switch.get_contacts_from_phonebook(self.api_key, phonebook_id)
return response
def add_new_contact(self, phone_number, phonebook_id, country_code, options):
"""
A method to add a single contact to a phonebook using the termii API
Params:
phone_number: str
Phone number of the contact without international format.
phonebook_id: str
The id of the phonebook
country_code: str
The country code of the number to be added
options: dict
A dictionary containing certain options such as 'email_address', 'first_name', 'last_name' and 'company' which are all strings. An empty dictionary should be passed if there are no options.
"""
response = termii_switch.add_contact(self.api_key, phone_number, phonebook_id, country_code, options)
return response
def add_contacts(self, contact_file, country_code, extension, phonebook_id):
"""
A method to add contacts to a phonebook using the termii API
Params:
contact_file: str
File containing the list of contacts you want to add to your phonebook. Supported files include : 'txt', 'xlsx', and 'csv'.
country_code: str
Represents short numeric geographical codes developed to represent countries (Example: 234 ).
extension: str
The extension of the contact file: (Example: 'text/csv')
phonebook_id: str
The id of the phonebook
"""
response = termii_switch.add_many_contacts(self.api_key, contact_file, country_code, extension, phonebook_id)
return response
def delete_contact(self, contact_id):
"""
A method to delete contacts from a phonebook using the termii API
Params:
api_key: str
The API key for a certain termii account
contact_id: str
The id of the contact to be deleted
"""
response = termii_switch.delete_one_contact(self.api_key, | |
see lines 3273-3281 in calendrica-3.0.cl
def aberration(tee):
"""Return the aberration at moment, tee."""
c = julian_centuries(tee)
return ((deg(mpf(0.0000974)) *
cosine_degrees(deg(mpf(177.63)) + deg(mpf(35999.01848)) * c)) -
deg(mpf(0.005575)))
# see lines 3283-3295 in calendrica-3.0.cl
def solar_longitude_after(lam, tee):
"""Return the moment UT of the first time at or after moment, tee,
when the solar longitude will be lam degrees."""
rate = MEAN_TROPICAL_YEAR / deg(360)
tau = tee + rate * mod(lam - solar_longitude(tee), 360)
a = max(tee, tau - 5)
b = tau + 5
return invert_angular(solar_longitude, lam, a, b)
# see lines 3297-3300 in calendrica-3.0.cl
SPRING = deg(0)
# see lines 3302-3305 in calendrica-3.0.cl
SUMMER = deg(90)
# see lines 3307-3310 in calendrica-3.0.cl
AUTUMN = deg(180)
# see lines 3312-3315 in calendrica-3.0.cl
WINTER = deg(270)
# see lines 3317-3339 in calendrica-3.0.cl
def precession(tee):
"""Return the precession at moment tee using 0,0 as J2000 coordinates.
Adapted from "Astronomical Algorithms" by <NAME>,
Willmann-Bell, Inc., 1991."""
c = julian_centuries(tee)
eta = mod(poly(c, [0,
secs(mpf(47.0029)),
secs(mpf(-0.03302)),
secs(mpf(0.000060))]),
360)
cap_P = mod(poly(c, [deg(mpf(174.876384)),
secs(mpf(-869.8089)),
secs(mpf(0.03536))]),
360)
p = mod(poly(c, [0,
secs(mpf(5029.0966)),
secs(mpf(1.11113)),
secs(mpf(0.000006))]),
360)
cap_A = cosine_degrees(eta) * sin_degrees(cap_P)
cap_B = cosine_degrees(cap_P)
arg = arctan_degrees(cap_A, cap_B)
return mod(p + cap_P - arg, 360)
# see lines 3341-3347 in calendrica-3.0.cl
def sidereal_solar_longitude(tee):
"""Return sidereal solar longitude at moment, tee."""
return mod(solar_longitude(tee) - precession(tee) + SIDEREAL_START, 360)
# see lines 3349-3365 in calendrica-3.0.cl
def estimate_prior_solar_longitude(lam, tee):
"""Return approximate moment at or before tee
when solar longitude just exceeded lam degrees."""
rate = MEAN_TROPICAL_YEAR / deg(360)
tau = tee - (rate * mod(solar_longitude(tee) - lam, 360))
cap_Delta = mod(solar_longitude(tau) - lam + deg(180), 360) - deg(180)
return min(tee, tau - (rate * cap_Delta))
# see lines 3367-3376 in calendrica-3.0.cl
def mean_lunar_longitude(c):
"""Return mean longitude of moon (in degrees) at moment
given in Julian centuries c (including the constant term of the
effect of the light-time (-0".70).
Adapted from eq. 47.1 in "Astronomical Algorithms" by <NAME>,
Willmann_Bell, Inc., 2nd ed. with corrections, 2005."""
return degrees(poly(c,deg([mpf(218.3164477), mpf(481267.88123421),
mpf(-0.0015786), mpf(1/538841),
mpf(-1/65194000)])))
# see lines 3378-3387 in calendrica-3.0.cl
def lunar_elongation(c):
"""Return elongation of moon (in degrees) at moment
given in Julian centuries c.
Adapted from eq. 47.2 in "Astronomical Algorithms" by <NAME>,
Willmann_Bell, Inc., 2nd ed. with corrections, 2005."""
return degrees(poly(c, deg([mpf(297.8501921), mpf(445267.1114034),
mpf(-0.0018819), mpf(1/545868),
mpf(-1/113065000)])))
# see lines 3389-3398 in calendrica-3.0.cl
def solar_anomaly(c):
"""Return mean anomaly of sun (in degrees) at moment
given in Julian centuries c.
Adapted from eq. 47.3 in "Astronomical Algorithms" by <NAME>,
Willmann_Bell, Inc., 2nd ed. with corrections, 2005."""
return degrees(poly(c,deg([mpf(357.5291092), mpf(35999.0502909),
mpf(-0.0001536), mpf(1/24490000)])))
# see lines 3400-3409 in calendrica-3.0.cl
def lunar_anomaly(c):
"""Return mean anomaly of moon (in degrees) at moment
given in Julian centuries c.
Adapted from eq. 47.4 in "Astronomical Algorithms" by <NAME>,
Willmann_Bell, Inc., 2nd ed. with corrections, 2005."""
return degrees(poly(c, deg([mpf(134.9633964), mpf(477198.8675055),
mpf(0.0087414), mpf(1/69699),
mpf(-1/14712000)])))
# see lines 3411-3420 in calendrica-3.0.cl
def moon_node(c):
"""Return Moon's argument of latitude (in degrees) at moment
given in Julian centuries 'c'.
Adapted from eq. 47.5 in "Astronomical Algorithms" by <NAME>,
Willmann_Bell, Inc., 2nd ed. with corrections, 2005."""
return degrees(poly(c, deg([mpf(93.2720950), mpf(483202.0175233),
mpf(-0.0036539), mpf(-1/3526000),
mpf(1/863310000)])))
# see lines 3422-3485 in calendrica-3.0.cl
def lunar_longitude(tee):
"""Return longitude of moon (in degrees) at moment tee.
Adapted from "Astronomical Algorithms" by <NAME>,
Willmann_Bell, Inc., 2nd ed., 1998."""
c = julian_centuries(tee)
cap_L_prime = mean_lunar_longitude(c)
cap_D = lunar_elongation(c)
cap_M = solar_anomaly(c)
cap_M_prime = lunar_anomaly(c)
cap_F = moon_node(c)
# see eq. 47.6 in Meeus
cap_E = poly(c, [1, mpf(-0.002516), mpf(-0.0000074)])
args_lunar_elongation = \
[0, 2, 2, 0, 0, 0, 2, 2, 2, 2, 0, 1, 0, 2, 0, 0, 4, 0, 4, 2, 2, 1,
1, 2, 2, 4, 2, 0, 2, 2, 1, 2, 0, 0, 2, 2, 2, 4, 0, 3, 2, 4, 0, 2,
2, 2, 4, 0, 4, 1, 2, 0, 1, 3, 4, 2, 0, 1, 2]
args_solar_anomaly = \
[0, 0, 0, 0, 1, 0, 0, -1, 0, -1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1,
0, 1, -1, 0, 0, 0, 1, 0, -1, 0, -2, 1, 2, -2, 0, 0, -1, 0, 0, 1,
-1, 2, 2, 1, -1, 0, 0, -1, 0, 1, 0, 1, 0, 0, -1, 2, 1, 0]
args_lunar_anomaly = \
[1, -1, 0, 2, 0, 0, -2, -1, 1, 0, -1, 0, 1, 0, 1, 1, -1, 3, -2,
-1, 0, -1, 0, 1, 2, 0, -3, -2, -1, -2, 1, 0, 2, 0, -1, 1, 0,
-1, 2, -1, 1, -2, -1, -1, -2, 0, 1, 4, 0, -2, 0, 2, 1, -2, -3,
2, 1, -1, 3]
args_moon_node = \
[0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, -2, 2, -2, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, -2, 2, 0, 2, 0, 0, 0, 0,
0, 0, -2, 0, 0, 0, 0, -2, -2, 0, 0, 0, 0, 0, 0, 0]
sine_coefficients = \
[6288774,1274027,658314,213618,-185116,-114332,
58793,57066,53322,45758,-40923,-34720,-30383,
15327,-12528,10980,10675,10034,8548,-7888,
-6766,-5163,4987,4036,3994,3861,3665,-2689,
-2602, 2390,-2348,2236,-2120,-2069,2048,-1773,
-1595,1215,-1110,-892,-810,759,-713,-700,691,
596,549,537,520,-487,-399,-381,351,-340,330,
327,-323,299,294]
correction = (deg(1/1000000) *
sigma([sine_coefficients, args_lunar_elongation,
args_solar_anomaly, args_lunar_anomaly,
args_moon_node],
lambda v, w, x, y, z:
v * pow(cap_E, abs(x)) *
sin_degrees((w * cap_D) +
(x * cap_M) +
(y * cap_M_prime) +
(z * cap_F))))
A1 = deg(mpf(119.75)) + (c * deg(mpf(131.849)))
venus = (deg(3958/1000000) * sin_degrees(A1))
A2 = deg(mpf(53.09)) + c * deg(mpf(479264.29))
jupiter = (deg(318/1000000) * sin_degrees(A2))
flat_earth = (deg(1962/1000000) * sin_degrees(cap_L_prime - cap_F))
return mod(cap_L_prime + correction + venus +
jupiter + flat_earth + nutation(tee), 360)
# see lines 3663-3732 in calendrica-3.0.cl
def lunar_latitude(tee):
"""Return the latitude of moon (in degrees) at moment, tee.
Adapted from "Astronomical Algorithms" by <NAME>,
Willmann_Bell, Inc., 1998."""
c = julian_centuries(tee)
cap_L_prime = mean_lunar_longitude(c)
cap_D = lunar_elongation(c)
cap_M = solar_anomaly(c)
cap_M_prime = lunar_anomaly(c)
cap_F = moon_node(c)
cap_E = poly(c, [1, mpf(-0.002516), mpf(-0.0000074)])
args_lunar_elongation = \
[0, 0, 0, 2, 2, 2, 2, 0, 2, 0, 2, 2, 2, 2, 2, 2, 2, 0, 4, 0, 0, 0,
1, 0, 0, 0, 1, 0, 4, 4, 0, 4, 2, 2, 2, 2, 0, 2, 2, 2, 2, 4, 2, 2,
0, 2, 1, 1, 0, 2, 1, 2, 0, 4, 4, 1, 4, 1, 4, 2]
args_solar_anomaly = \
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 1, -1, -1, -1, 1, 0, 1,
0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1, 1,
0, -1, -2, 0, 1, 1, 1, 1, 1, 0, -1, 1, 0, -1, 0, 0, 0, -1, -2]
args_lunar_anomaly = \
[0, 1, 1, 0, -1, -1, 0, 2, 1, 2, 0, -2, 1, 0, -1, 0, -1, -1, -1,
0, 0, -1, 0, 1, 1, 0, 0, 3, 0, -1, 1, -2, 0, 2, 1, -2, 3, 2, -3,
-1, 0, 0, 1, 0, 1, 1, 0, 0, -2, -1, 1, -2, 2, -2, -1, 1, 1, -2,
0, 0]
args_moon_node = \
[1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1,
-1, 1, 3, 1, 1, 1, -1, -1, -1, 1, -1, 1, -3, 1, -3, -1, -1, 1,
-1, 1, -1, 1, 1, 1, 1, -1, 3, -1, -1, 1, -1, -1, 1, -1, 1, -1,
-1, -1, -1, -1, -1, 1]
sine_coefficients = \
[5128122, 280602, 277693, 173237, 55413, 46271, 32573,
17198, 9266, 8822, 8216, 4324, 4200, -3359, 2463, 2211,
2065, -1870, 1828, -1794, -1749, -1565, -1491, -1475,
-1410, -1344, -1335, 1107, 1021, 833, 777, 671, 607,
596, 491, -451, 439, 422, 421, -366, -351, 331, 315,
302, -283, -229, 223, 223, -220, -220, -185, 181,
-177, 176, 166, -164, 132, -119, 115, 107]
beta = (deg(1/1000000) *
sigma([sine_coefficients,
args_lunar_elongation,
args_solar_anomaly,
args_lunar_anomaly,
args_moon_node],
lambda v, w, x, y, z: (v *
pow(cap_E, abs(x)) *
sin_degrees((w * cap_D) +
(x * cap_M) +
(y * cap_M_prime) +
(z * cap_F)))))
venus = (deg(175/1000000) *
(sin_degrees(deg(mpf(119.75)) | |
import numpy as np
from PIL import Image
import torch
import torchvision
from torchvision import transforms
import torchvision.models as models
from torchvision.datasets import ImageFolder
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, random_split
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
import seaborn as sns
import time
import pandas as pd
import matplotlib.pyplot as plt
import splitfolders
from plotting import make_plots, plot_matrix
def preprocessing():
batch_size = 64
train_transform = transforms.Compose([transforms.Resize(224),
transforms.RandomRotation(45),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
val_transform = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
train_dataset = ImageFolder(root='train/', transform=train_transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True)
dev_dataset = ImageFolder(root='val_test/val/', transform=val_transform)
dev_loader = DataLoader(dev_dataset, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
test_dataset = ImageFolder(root='val_test/test/', transform=val_transform)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
print('train dataset: {} images {} classes'.format(len(train_dataset), len(train_dataset.classes)))
print('dev dataset: {} images {} classes'.format(len(dev_dataset), len(dev_dataset.classes)))
print('test dataset: {} images {} classes'.format(len(test_dataset), len(test_dataset.classes)))
return train_loader, dev_loader, test_loader
def valid_average_ensemble(model_list, model_weights, val_loader, criterion, device, dataset, empty_cache=False):
start = time.time()
test_loss = []
accuracy = 0
total = 0
true_label = []
pred_label = []
with torch.no_grad():
for batch_num, (feats, labels) in enumerate(val_loader):
feats, labels = feats.to(device), labels.to(device)
output_list = []
for name, model in model_list.items():
model.eval()
outputs = model(feats.float())
output_list.append(outputs * model_weights[name])
output_list = torch.stack(output_list)
output_list = torch.mean(output_list, dim=0)
pred_labels = torch.max(F.softmax(output_list, dim=1), 1)[1]
pred_labels = pred_labels.view(-1)
loss = criterion(output_list, labels.long())
accuracy += torch.sum(torch.eq(pred_labels, labels)).item()
total += len(labels)
test_loss.extend([loss.item()]*feats.size()[0])
pred_label.extend(pred_labels.detach().cpu().numpy())
true_label.extend(labels.detach().cpu().numpy())
if empty_cache:
torch.cuda.empty_cache()
del feats
del labels
del loss
matrix = confusion_matrix(true_label, pred_label)
end = time.time()
print('{} Validation Time: {:.4f}'.format(dataset, end-start))
return np.mean(test_loss), accuracy/total, matrix
def valid_mode_ensemble(model_list, val_loader, criterion, device, dataset, empty_cache=False):
start = time.time()
test_loss = []
accuracy = 0
total = 0
true_label = []
pred_label = []
with torch.no_grad():
for batch_num, (feats, labels) in enumerate(val_loader):
feats, labels = feats.to(device), labels.to(device)
pred_list = []
avg_loss = []
for name, model in model_list.items():
model.eval()
outputs = model(feats.float())
loss = criterion(outputs, labels.long())
avg_loss.append(loss.item())
pred_labels = torch.max(F.softmax(outputs, dim=1), 1)[1]
pred_labels = pred_labels.view(-1)
pred_list.append(pred_labels)
avg_loss = np.mean(avg_loss)
pred_list = torch.stack(pred_list)
pred_list = torch.mode(pred_list, dim=0)[0]
pred_label.extend(pred_list.detach().cpu().numpy())
true_label.extend(labels.detach().cpu().numpy())
accuracy += torch.sum(torch.eq(pred_list, labels)).item()
total += len(labels)
test_loss.extend([avg_loss]*feats.size()[0])
if empty_cache:
torch.cuda.empty_cache()
del feats
del labels
del loss
matrix = confusion_matrix(true_label, pred_label)
end = time.time()
print('{} Validation Time: {:.4f}'.format(dataset, end-start))
return np.mean(test_loss), accuracy/total, matrix
def plot_ensembles(a, b):
plt.figure(1)
plt.plot(range(1, len(a) + 1), a, 'b', label='valid')
plt.plot(range(1, len(b) + 1), b, 'g', label='test')
plt.title('Ensemble Model Validation/Test Accuracy')
plt.xlabel('Number of Models')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('ensemble_accuracy.png')
plt.show()
if __name__ == '__main__':
# run only once to set up validation and test folders
splitfolders.ratio('test', output='val_test', seed=1337, ratio=(0, 0.5, 0.5), group_prefix=None)
# check for GPU
cuda = torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
print(device)
train_loader, dev_loader, test_loader = preprocessing()
# load individual models
num_classes = 7
train_accuracy = {}
valid_accuracy = {}
test_accuracy = {}
model_list = {}
criterion = nn.CrossEntropyLoss()
empty_cache = True
model_name = 'ResNet18'
resnet18 = models.resnet18()
resnet18.fc = nn.Linear(512, num_classes)
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
resnet18.load_state_dict(saved_model['model'])
model_list[model_name] = resnet18
train_loss = saved_model['train_losses']
valid_loss = saved_model['valid_losses']
test_loss = saved_model['test_losses']
train_accuracy[model_name] = saved_model['train_accuracy']
valid_accuracy[model_name] = saved_model['valid_accuracy']
test_accuracy[model_name] = saved_model['test_accuracy']
resnet18.to(device)
resnet18.eval()
print('{} Test Loss: {}'.format(model_name, test_loss[-1]))
make_plots(train_accuracy[model_name], valid_accuracy[model_name], model_name, 'Accuracy')
make_plots(train_loss, valid_loss, model_name, 'Loss')
loss, acc, matrix = valid(resnet18, dev_loader, criterion, device, 0, 'Validation Dataset', empty_cache)
plot_matrix(matrix, 'Validation', model_name)
loss, acc, matrix = valid(resnet18, test_loader, criterion, device, 0, 'Test Dataset', empty_cache)
plot_matrix(matrix, 'Test', model_name)
model_name = 'ResNet34'
resnet34 = models.resnet34()
resnet34.fc = nn.Linear(512, num_classes)
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
resnet34.load_state_dict(saved_model['model'])
model_list[model_name] = resnet34
train_loss = saved_model['train_losses']
valid_loss = saved_model['valid_losses']
test_loss = saved_model['test_losses']
train_accuracy[model_name] = saved_model['train_accuracy']
valid_accuracy[model_name] = saved_model['valid_accuracy']
test_accuracy[model_name] = saved_model['test_accuracy']
resnet34.to(device)
resnet34.eval()
print('{} Test Loss: {}'.format(model_name, test_loss[-1]))
make_plots(train_accuracy[model_name], valid_accuracy[model_name], model_name, 'Accuracy')
make_plots(train_loss, valid_loss, model_name, 'Loss')
loss, acc, matrix = valid(resnet34, dev_loader, criterion, device, 0, 'Validation Dataset', empty_cache)
plot_matrix(matrix, 'Validation', model_name)
loss, acc, matrix = valid(resnet34, test_loader, criterion, device, 0, 'Test Dataset', empty_cache)
plot_matrix(matrix, 'Test', model_name)
model_name = 'ResNet50'
resnet50 = models.resnet50()
resnet50.fc = nn.Linear(2048, num_classes)
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
resnet50.load_state_dict(saved_model['model'])
model_list[model_name] = resnet50
train_loss = saved_model['train_losses']
valid_loss = saved_model['valid_losses']
test_loss = saved_model['test_losses']
train_accuracy[model_name] = saved_model['train_accuracy']
valid_accuracy[model_name] = saved_model['valid_accuracy']
test_accuracy[model_name] = saved_model['test_accuracy']
resnet50.to(device)
resnet50.eval()
print('{} Test Loss: {}'.format(model_name, test_loss[-1]))
make_plots(train_accuracy[model_name], valid_accuracy[model_name], model_name, 'Accuracy')
make_plots(train_loss, valid_loss, model_name, 'Loss')
loss, acc, matrix = valid(resnet50, dev_loader, criterion, device, 0, 'Validation Dataset', empty_cache)
plot_matrix(matrix, 'Validation', model_name)
loss, acc, matrix = valid(resnet50, test_loader, criterion, device, 0, 'Test Dataset', empty_cache)
plot_matrix(matrix, 'Test', model_name)
model_name = 'ResNet101'
resnet101 = models.resnet101()
resnet101.fc = nn.Linear(2048, num_classes)
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
resnet101.load_state_dict(saved_model['model'])
model_list[model_name] = resnet101
train_loss = saved_model['train_losses']
valid_loss = saved_model['valid_losses']
test_loss = saved_model['test_losses']
train_accuracy[model_name] = saved_model['train_accuracy']
valid_accuracy[model_name] = saved_model['valid_accuracy']
test_accuracy[model_name] = saved_model['test_accuracy']
resnet101.to(device)
resnet101.eval()
print('{} Test Loss: {}'.format(model_name, test_loss[-1]))
make_plots(train_accuracy[model_name], valid_accuracy[model_name], model_name, 'Accuracy')
make_plots(train_loss, valid_loss, model_name, 'Loss')
loss, acc, matrix = valid(resnet101, dev_loader, criterion, device, 0, 'Validation Dataset', empty_cache)
plot_matrix(matrix, 'Validation', model_name)
loss, acc, matrix = valid(resnet101, test_loader, criterion, device, 0, 'Test Dataset', empty_cache)
plot_matrix(matrix, 'Test', model_name)
model_name = 'ResNet152'
resnet152 = models.resnet152()
resnet152.fc = nn.Linear(2048, num_classes)
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
resnet152.load_state_dict(saved_model['model'])
model_list[model_name] = resnet152
train_loss = saved_model['train_losses']
valid_loss = saved_model['valid_losses']
test_loss = saved_model['test_losses']
train_accuracy[model_name] = saved_model['train_accuracy']
valid_accuracy[model_name] = saved_model['valid_accuracy']
test_accuracy[model_name] = saved_model['test_accuracy']
resnet152.to(device)
resnet152.eval()
print('{} Test Loss: {}'.format(model_name, test_loss[-1]))
make_plots(train_accuracy[model_name], valid_accuracy[model_name], model_name, 'Accuracy')
make_plots(train_loss, valid_loss, model_name, 'Loss')
loss, acc, matrix = valid(resnet152, dev_loader, criterion, device, 0, 'Validation Dataset', empty_cache)
plot_matrix(matrix, 'Validation', model_name)
loss, acc, matrix = valid(resnet152, test_loader, criterion, device, 0, 'Test Dataset', empty_cache)
plot_matrix(matrix, 'Test', model_name)
model_name = 'ResNeXt50'
resnext50 = models.resnext50_32x4d()
resnext50.fc = nn.Linear(2048, num_classes)
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
resnext50.load_state_dict(saved_model['model'])
model_list[model_name] = resnext50
train_loss = saved_model['train_losses']
valid_loss = saved_model['valid_losses']
test_loss = saved_model['test_losses']
train_accuracy[model_name] = saved_model['train_accuracy']
valid_accuracy[model_name] = saved_model['valid_accuracy']
test_accuracy[model_name] = saved_model['test_accuracy']
resnext50.to(device)
resnext50.eval()
print('{} Test Loss: {}'.format(model_name, test_loss[-1]))
make_plots(train_accuracy[model_name], valid_accuracy[model_name], model_name, 'Accuracy')
make_plots(train_loss, valid_loss, model_name, 'Loss')
loss, acc, matrix = valid(resnext50, dev_loader, criterion, device, 0, 'Validation Dataset', empty_cache)
plot_matrix(matrix, 'Validation', model_name)
loss, acc, matrix = valid(resnext50, test_loader, criterion, device, 0, 'Test Dataset', empty_cache)
plot_matrix(matrix, 'Test', model_name)
model_name = 'ResNeXt101'
resnext101 = models.resnext101_32x8d()
resnext101.fc = nn.Linear(2048, num_classes)
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
resnext101.load_state_dict(saved_model['model'])
model_list[model_name] = resnext101
train_loss = saved_model['train_losses']
valid_loss = saved_model['valid_losses']
test_loss = saved_model['test_losses']
train_accuracy[model_name] = saved_model['train_accuracy']
valid_accuracy[model_name] = saved_model['valid_accuracy']
test_accuracy[model_name] = saved_model['test_accuracy']
resnext101.to(device)
resnext101.eval()
print('{} Test Loss: {}'.format(model_name, test_loss[-1]))
make_plots(train_accuracy[model_name], valid_accuracy[model_name], model_name, 'Accuracy')
make_plots(train_loss, valid_loss, model_name, 'Loss')
loss, acc, matrix = valid(resnext101, dev_loader, criterion, device, 0, 'Validation Dataset', empty_cache)
plot_matrix(matrix, 'Validation', model_name)
loss, acc, matrix = valid(resnext101, test_loader, criterion, device, 0, 'Test Dataset', empty_cache)
plot_matrix(matrix, 'Test', model_name)
model_name = 'Wide_ResNet50'
wide_resnet50 = models.wide_resnet50_2()
wide_resnet50.fc = nn.Linear(2048, num_classes)
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
wide_resnet50.load_state_dict(saved_model['model'])
model_list[model_name] = wide_resnet50
train_loss = saved_model['train_losses']
valid_loss = saved_model['valid_losses']
test_loss = saved_model['test_losses']
train_accuracy[model_name] = saved_model['train_accuracy']
valid_accuracy[model_name] = saved_model['valid_accuracy']
test_accuracy[model_name] = saved_model['test_accuracy']
wide_resnet50.to(device)
wide_resnet50.eval()
print('{} Test Loss: {}'.format(model_name, test_loss[-1]))
make_plots(train_accuracy[model_name], valid_accuracy[model_name], model_name, 'Accuracy')
make_plots(train_loss, valid_loss, model_name, 'Loss')
loss, acc, matrix = valid(wide_resnet50, dev_loader, criterion, device, 0, 'Validation Dataset', empty_cache)
plot_matrix(matrix, 'Validation', model_name)
loss, acc, matrix = valid(wide_resnet50, test_loader, criterion, device, 0, 'Test Dataset', empty_cache)
plot_matrix(matrix, 'Test', model_name)
model_name = 'Wide_ResNet101'
wide_resnet101 = models.wide_resnet101_2()
wide_resnet101.fc = nn.Linear(2048, num_classes)
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
wide_resnet101.load_state_dict(saved_model['model'])
model_list[model_name] = wide_resnet101
train_loss = saved_model['train_losses']
valid_loss = saved_model['valid_losses']
test_loss = saved_model['test_losses']
train_accuracy[model_name] = saved_model['train_accuracy']
valid_accuracy[model_name] = saved_model['valid_accuracy']
test_accuracy[model_name] = saved_model['test_accuracy']
wide_resnet101.to(device)
wide_resnet101.eval()
print('{} Test Loss: {}'.format(model_name, test_loss[-1]))
make_plots(train_accuracy[model_name], valid_accuracy[model_name], model_name, 'Accuracy')
make_plots(train_loss, valid_loss, model_name, 'Loss')
loss, acc, matrix = valid(wide_resnet101, dev_loader, criterion, device, 0, 'Validation Dataset', empty_cache)
plot_matrix(matrix, 'Validation', model_name)
loss, acc, matrix = valid(wide_resnet101, test_loader, criterion, device, 0, 'Test Dataset', empty_cache)
plot_matrix(matrix, 'Test', model_name)
model_name = 'DenseNet121'
densenet121 = models.densenet121()
densenet121.classifier = nn.Linear(1024, num_classes)
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
densenet121.load_state_dict(saved_model['model'])
model_list[model_name] = densenet121
train_loss = saved_model['train_losses']
valid_loss = saved_model['valid_losses']
test_loss = saved_model['test_losses']
train_accuracy[model_name] = saved_model['train_accuracy']
valid_accuracy[model_name] = saved_model['valid_accuracy']
test_accuracy[model_name] = saved_model['test_accuracy']
densenet121.to(device)
densenet121.eval()
print('{} Test Loss: {}'.format(model_name, test_loss[-1]))
make_plots(train_accuracy[model_name], valid_accuracy[model_name], model_name, 'Accuracy')
make_plots(train_loss, valid_loss, model_name, 'Loss')
loss, | |
self.SetUseTabs(False)
self.SetMargins(2, 2)
self.SetMarginWidth(1, 1)
def setCmdKeys(self):
self.CmdKeyAssign(ord("="), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMIN)
self.CmdKeyAssign(ord("-"), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMOUT)
def setStyle(self):
self.SetLexer(wx.stc.STC_LEX_CONTAINER)
self.SetStyleBits(5)
self.Bind(wx.stc.EVT_STC_STYLENEEDED, self.OnStyling)
self.SetCaretForeground("#000000")
self.SetCaretWidth(2)
# Global default styles for all languages
self.StyleSetSpec(stc.STC_STYLE_DEFAULT, "face:%(mono)s,size:%(size)d" % self.faces)
self.StyleClearAll()
self.StyleSetSpec(stc.STC_STYLE_DEFAULT, "face:%(mono)s,size:%(size)d" % self.faces)
self.StyleSetSpec(stc.STC_STYLE_CONTROLCHAR, "face:%(mono)s" % self.faces)
self.StyleSetSpec(stc.STC_STYLE_BRACELIGHT, "fore:#FFFFFF,back:#0000FF,bold")
self.StyleSetSpec(stc.STC_STYLE_BRACEBAD, "fore:#000000,back:#FF0000,bold")
# MML specific styles
self.StyleSetSpec(self.lexer.STC_MML_DEFAULT, "fore:#000000,face:%(mono)s,size:%(size)d" % self.faces)
self.StyleSetSpec(self.lexer.STC_MML_KEYWORD, "fore:#3300DD,face:%(mono)s,size:%(size)d,bold" % self.faces)
self.StyleSetSpec(self.lexer.STC_MML_KEYWORD2, "fore:#0033FF,face:%(mono)s,size:%(size)d,bold" % self.faces)
self.StyleSetSpec(self.lexer.STC_MML_VARIABLE, "fore:#006600,face:%(mono)s,size:%(size)d,bold" % self.faces)
self.StyleSetSpec(self.lexer.STC_MML_VOICE_TOKEN, "fore:#555500,face:%(mono)s,size:%(size)d,bold" % self.faces)
self.StyleSetSpec(self.lexer.STC_MML_COMMENT, "fore:#444444,face:%(mono)s,size:%(size)d,italic" % self.faces)
self.SetSelBackground(1, "#CCCCDD")
def OnStyling(self, evt):
self.lexer.StyleText(evt)
def loadfile(self, filename):
self.LoadFile(filename)
self.currentfile = filename
self.GetParent().SetTitle(self.currentfile)
def savefile(self, filename):
self.currentfile = filename
self.GetParent().SetTitle(self.currentfile)
self.SaveFile(filename)
self.OnUpdateUI(None)
def OnUpdateUI(self, evt):
# check for matching braces
braceAtCaret = -1
braceOpposite = -1
charBefore = None
caretPos = self.GetCurrentPos()
if caretPos > 0:
charBefore = self.GetCharAt(caretPos - 1)
styleBefore = self.GetStyleAt(caretPos - 1)
# check before
if charBefore and chr(charBefore) in "[]{}()":
braceAtCaret = caretPos - 1
# check after
if braceAtCaret < 0:
charAfter = self.GetCharAt(caretPos)
styleAfter = self.GetStyleAt(caretPos)
if charAfter and chr(charAfter) in "[]{}()":
braceAtCaret = caretPos
if braceAtCaret >= 0:
braceOpposite = self.BraceMatch(braceAtCaret)
if braceAtCaret != -1 and braceOpposite == -1:
self.BraceBadLight(braceAtCaret)
else:
self.BraceHighlight(braceAtCaret, braceOpposite)
# Check if horizontal scrollbar is needed
self.checkScrollbar()
def checkScrollbar(self):
lineslength = [self.LineLength(i) + 1 for i in range(self.GetLineCount())]
maxlength = max(lineslength)
width = self.GetCharWidth() + (self.GetZoom() * 0.5)
if (self.GetSize()[0]) < (maxlength * width):
self.SetUseHorizontalScrollBar(True)
else:
self.SetUseHorizontalScrollBar(False)
def onExecute(self, evt):
pos = self.GetCurrentPos()
self.obj.music = self.GetText()
self.SetCurrentPos(pos)
self.SetSelection(pos, pos)
class MMLEditorFrame(wx.Frame):
def __init__(self, parent=None, obj=None):
wx.Frame.__init__(self, parent, size=(650, 450))
self.obj = obj
self.obj._editor = self
self.editor = MMLEditor(self, -1, self.obj)
self.menubar = wx.MenuBar()
self.fileMenu = wx.Menu()
self.fileMenu.Append(wx.ID_OPEN, "Open\tCtrl+O")
self.Bind(wx.EVT_MENU, self.open, id=wx.ID_OPEN)
self.fileMenu.Append(wx.ID_CLOSE, "Close\tCtrl+W", kind=wx.ITEM_NORMAL)
self.Bind(wx.EVT_MENU, self.close, id=wx.ID_CLOSE)
self.fileMenu.AppendSeparator()
self.fileMenu.Append(wx.ID_SAVE, "Save\tCtrl+S")
self.Bind(wx.EVT_MENU, self.save, id=wx.ID_SAVE)
self.fileMenu.Append(wx.ID_SAVEAS, "Save As...\tShift+Ctrl+S")
self.Bind(wx.EVT_MENU, self.saveas, id=wx.ID_SAVEAS)
self.menubar.Append(self.fileMenu, "&File")
self.SetMenuBar(self.menubar)
def open(self, evt):
dlg = wx.FileDialog(
self, message="Choose a file", defaultDir=os.path.expanduser("~"), defaultFile="", style=wx.FD_OPEN
)
if dlg.ShowModal() == wx.ID_OK:
path = ensureNFD(dlg.GetPath())
self.editor.loadfile(path)
dlg.Destroy()
def close(self, evt):
self.obj._editor = None
self.Destroy()
def save(self, evt):
path = self.editor.currentfile
if not path:
self.saveas(None)
else:
self.editor.savefile(path)
def saveas(self, evt):
deffile = os.path.split(self.editor.currentfile)[1]
dlg = wx.FileDialog(
self, message="Save file as ...", defaultDir=os.path.expanduser("~"), defaultFile=deffile, style=wx.FD_SAVE
)
dlg.SetFilterIndex(0)
if dlg.ShowModal() == wx.ID_OK:
path = ensureNFD(dlg.GetPath())
self.editor.savefile(path)
dlg.Destroy()
def update(self, text):
self.editor.SetText(text)
class Keyboard(wx.Panel):
def __init__(
self,
parent,
id=wx.ID_ANY,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
poly=64,
outFunction=None,
style=wx.TAB_TRAVERSAL,
):
wx.Panel.__init__(self, parent, id, pos, size, style)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.SetBackgroundColour(BACKGROUND_COLOUR)
self.parent = parent
self.outFunction = outFunction
self.poly = poly
self.gap = 0
self.offset = 12
self.w1 = 15
self.w2 = int(self.w1 / 2) + 1
self.hold = 1
self.keyPressed = None
self.Bind(wx.EVT_LEFT_DOWN, self.MouseDown)
self.Bind(wx.EVT_LEFT_UP, self.MouseUp)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.white = (0, 2, 4, 5, 7, 9, 11)
self.black = (1, 3, 6, 8, 10)
self.whiteSelected = []
self.blackSelected = []
self.whiteVelocities = {}
self.blackVelocities = {}
self.whiteKeys = []
self.blackKeys = []
self.offRec = wx.Rect(900 - 55, 0, 28, 150)
self.holdRec = wx.Rect(900 - 27, 0, 27, 150)
self.keydown = []
self.keymap = {
90: 36,
83: 37,
88: 38,
68: 39,
67: 40,
86: 41,
71: 42,
66: 43,
72: 44,
78: 45,
74: 46,
77: 47,
44: 48,
76: 49,
46: 50,
59: 51,
47: 52,
81: 60,
50: 61,
87: 62,
51: 63,
69: 64,
82: 65,
53: 66,
84: 67,
54: 68,
89: 69,
55: 70,
85: 71,
73: 72,
57: 73,
79: 74,
48: 75,
80: 76,
}
wx.CallAfter(self._setRects)
def getCurrentNotes(self):
"Returns a list of the current notes."
notes = []
for key in self.whiteSelected:
notes.append((self.white[key % 7] + int(key / 7) * 12 + self.offset, 127 - self.whiteVelocities[key]))
for key in self.blackSelected:
notes.append((self.black[key % 5] + int(key / 5) * 12 + self.offset, 127 - self.blackVelocities[key]))
notes.sort()
return notes
def reset(self):
"Resets the keyboard state."
for key in self.blackSelected:
pit = self.black[key % 5] + int(key / 5) * 12 + self.offset
note = (pit, 0)
if self.outFunction:
self.outFunction(note)
for key in self.whiteSelected:
pit = self.white[key % 7] + int(key / 7) * 12 + self.offset
note = (pit, 0)
if self.outFunction:
self.outFunction(note)
self.whiteSelected = []
self.blackSelected = []
self.whiteVelocities = {}
self.blackVelocities = {}
wx.CallAfter(self.Refresh)
def setPoly(self, poly):
"Sets the maximum number of notes that can be held at the same time."
self.poly = poly
def _setRects(self):
w, h = self.GetSize()
self.offRec = wx.Rect(w - 55, 0, 28, h)
self.holdRec = wx.Rect(w - 27, 0, 27, h)
num = int(w / self.w1)
self.gap = w - num * self.w1
self.whiteKeys = [wx.Rect(i * self.w1, 0, self.w1 - 1, h - 1) for i in range(num)]
self.blackKeys = []
height2 = int(h * 4 / 7)
for i in range(int(num / 7) + 1):
space2 = self.w1 * 7 * i
off = int(self.w1 / 2) + space2 + 3
self.blackKeys.append(wx.Rect(off, 0, self.w2, height2))
off += self.w1
self.blackKeys.append(wx.Rect(off, 0, self.w2, height2))
off += self.w1 * 2
self.blackKeys.append(wx.Rect(off, 0, self.w2, height2))
off += self.w1
self.blackKeys.append(wx.Rect(off, 0, self.w2, height2))
off += self.w1
self.blackKeys.append(wx.Rect(off, 0, self.w2, height2))
wx.CallAfter(self.Refresh)
def OnSize(self, evt):
self._setRects()
wx.CallAfter(self.Refresh)
evt.Skip()
def OnKeyDown(self, evt):
if evt.HasAnyModifiers():
evt.Skip()
return
if evt.GetKeyCode() in self.keymap and evt.GetKeyCode() not in self.keydown:
self.keydown.append(evt.GetKeyCode())
pit = self.keymap[evt.GetKeyCode()]
deg = pit % 12
total = len(self.blackSelected) + len(self.whiteSelected)
note = None
if self.hold:
if deg in self.black:
which = self.black.index(deg) + int((pit - self.offset) / 12) * 5
if which in self.blackSelected:
self.blackSelected.remove(which)
del self.blackVelocities[which]
note = (pit, 0)
else:
if total < self.poly:
self.blackSelected.append(which)
self.blackVelocities[which] = 100
note = (pit, 100)
elif deg in self.white:
which = self.white.index(deg) + int((pit - self.offset) / 12) * 7
if which in self.whiteSelected:
self.whiteSelected.remove(which)
del self.whiteVelocities[which]
note = (pit, 0)
else:
if total < self.poly:
self.whiteSelected.append(which)
self.whiteVelocities[which] = 100
note = (pit, 100)
else:
if deg in self.black:
which = self.black.index(deg) + int((pit - self.offset) / 12) * 5
if which not in self.blackSelected and total < self.poly:
self.blackSelected.append(which)
self.blackVelocities[which] = 100
note = (pit, 100)
elif deg in self.white:
which = self.white.index(deg) + int((pit - self.offset) / 12) * 7
if which not in self.whiteSelected and total < self.poly:
self.whiteSelected.append(which)
self.whiteVelocities[which] = 100
note = (pit, 100)
if note and self.outFunction and total < self.poly:
self.outFunction(note)
wx.CallAfter(self.Refresh)
evt.Skip()
def OnKeyUp(self, evt):
if evt.HasAnyModifiers():
evt.Skip()
return
if evt.GetKeyCode() in self.keydown:
del self.keydown[self.keydown.index(evt.GetKeyCode())]
if not self.hold and evt.GetKeyCode() in self.keymap:
pit = self.keymap[evt.GetKeyCode()]
deg = pit % 12
note = None
if deg in self.black:
which = self.black.index(deg) + int((pit - self.offset) / 12) * 5
if which in self.blackSelected:
self.blackSelected.remove(which)
del self.blackVelocities[which]
note = (pit, 0)
elif deg in self.white:
which = self.white.index(deg) + int((pit - self.offset) / 12) * 7
if which in self.whiteSelected:
self.whiteSelected.remove(which)
del self.whiteVelocities[which]
note = (pit, 0)
if note and self.outFunction:
self.outFunction(note)
wx.CallAfter(self.Refresh)
evt.Skip()
def MouseUp(self, evt):
if not self.hold and self.keyPressed is not None:
key = self.keyPressed[0]
pit = self.keyPressed[1]
if key in self.blackSelected:
self.blackSelected.remove(key)
del self.blackVelocities[key]
if key in self.whiteSelected:
self.whiteSelected.remove(key)
del self.whiteVelocities[key]
note = (pit, 0)
if self.outFunction:
self.outFunction(note)
self.keyPressed = None
wx.CallAfter(self.Refresh)
evt.Skip()
def MouseDown(self, evt):
w, h = self.GetSize()
pos = evt.GetPosition()
if self.holdRec.Contains(pos):
if self.hold:
self.hold = 0
self.reset()
else:
self.hold = 1
wx.CallAfter(self.Refresh)
return
if self.offUpRec.Contains(pos):
self.offset += 12
if self.offset > 60:
self.offset = 60
wx.CallAfter(self.Refresh)
return
if self.offDownRec.Contains(pos):
self.offset -= 12
if self.offset < 0:
self.offset = 0
wx.CallAfter(self.Refresh)
return
total = len(self.blackSelected) + len(self.whiteSelected)
scanWhite = True
note = None
if self.hold:
for i, rec in enumerate(self.blackKeys):
if rec.Contains(pos):
pit = self.black[i % 5] + int(i / 5) * 12 + self.offset
if i in self.blackSelected:
self.blackSelected.remove(i)
del self.blackVelocities[i]
vel = 0
else:
hb = int(h * 4 / 7)
vel = int((hb - pos[1]) * 127 / hb)
if total < self.poly:
self.blackSelected.append(i)
self.blackVelocities[i] = int(127 - vel)
note = (pit, vel)
scanWhite = False
break
if scanWhite:
for i, rec in enumerate(self.whiteKeys):
if rec.Contains(pos):
pit = self.white[i % 7] + | |
M N P Q R S T V W X Y Z""",
)
mat = SubsMat.SeqMat(MatrixInfo.blosum80)
self.assertEqual(len(mat), 276)
self.checkMatrix(
mat,
"""\
A 5
B -2 4
C -1 -4 9
D -2 4 -4 6
E -1 1 -5 1 6
F -3 -4 -3 -4 -4 6
G 0 -1 -4 -2 -3 -4 6
H -2 -1 -4 -2 0 -2 -3 8
I -2 -4 -2 -4 -4 -1 -5 -4 5
K -1 -1 -4 -1 1 -4 -2 -1 -3 5
L -2 -4 -2 -5 -4 0 -4 -3 1 -3 4
M -1 -3 -2 -4 -2 0 -4 -2 1 -2 2 6
N -2 4 -3 1 -1 -4 -1 0 -4 0 -4 -3 6
P -1 -2 -4 -2 -2 -4 -3 -3 -4 -1 -3 -3 -3 8
Q -1 0 -4 -1 2 -4 -2 1 -3 1 -3 0 0 -2 6
R -2 -2 -4 -2 -1 -4 -3 0 -3 2 -3 -2 -1 -2 1 6
S 1 0 -2 -1 0 -3 -1 -1 -3 -1 -3 -2 0 -1 0 -1 5
T 0 -1 -1 -1 -1 -2 -2 -2 -1 -1 -2 -1 0 -2 -1 -1 1 5
V 0 -4 -1 -4 -3 -1 -4 -4 3 -3 1 1 -4 -3 -3 -3 -2 0 4
W -3 -5 -3 -6 -4 0 -4 -3 -3 -4 -2 -2 -4 -5 -3 -4 -4 -4 -3 11
X -1 -2 -3 -2 -1 -2 -2 -2 -2 -1 -2 -1 -1 -2 -1 -1 -1 -1 -1 -3 -1
Y -2 -3 -3 -4 -3 3 -4 2 -2 -3 -2 -2 -3 -4 -2 -3 -2 -2 -2 2 -2 7
Z -1 0 -4 1 4 -4 -3 0 -4 1 -3 -2 0 -2 3 0 0 -1 -3 -4 -1 -3 4
A B C D E F G H I K L M N P Q R S T V W X Y Z""",
)
mat = SubsMat.SeqMat(MatrixInfo.blosum85)
self.assertEqual(len(mat), 276)
self.checkMatrix(
mat,
"""\
A 5
B -2 4
C -1 -4 9
D -2 4 -5 7
E -1 0 -5 1 6
F -3 -4 -3 -4 -4 7
G 0 -1 -4 -2 -3 -4 6
H -2 -1 -5 -2 -1 -2 -3 8
I -2 -5 -2 -5 -4 -1 -5 -4 5
K -1 -1 -4 -1 0 -4 -2 -1 -3 6
L -2 -5 -2 -5 -4 0 -5 -3 1 -3 4
M -2 -4 -2 -4 -3 -1 -4 -3 1 -2 2 7
N -2 4 -4 1 -1 -4 -1 0 -4 0 -4 -3 7
P -1 -3 -4 -2 -2 -4 -3 -3 -4 -2 -4 -3 -3 8
Q -1 -1 -4 -1 2 -4 -3 1 -4 1 -3 0 0 -2 6
R -2 -2 -4 -2 -1 -4 -3 0 -4 2 -3 -2 -1 -2 1 6
S 1 0 -2 -1 -1 -3 -1 -1 -3 -1 -3 -2 0 -1 -1 -1 5
T 0 -1 -2 -2 -1 -3 -2 -2 -1 -1 -2 -1 0 -2 -1 -2 1 5
V -1 -4 -1 -4 -3 -1 -4 -4 3 -3 0 0 -4 -3 -3 -3 -2 0 5
W -3 -5 -4 -6 -4 0 -4 -3 -3 -5 -3 -2 -5 -5 -3 -4 -4 -4 -3 11
X -1 -2 -3 -2 -1 -2 -2 -2 -2 -1 -2 -1 -2 -2 -1 -2 -1 -1 -1 -3 -2
Y -3 -4 -3 -4 -4 3 -5 2 -2 -3 -2 -2 -3 -4 -2 -3 -2 -2 -2 2 -2 7
Z -1 0 -5 1 4 -4 -3 0 -4 1 -4 -2 -1 -2 4 0 -1 -1 -3 -4 -1 -3 4
A B C D E F G H I K L M N P Q R S T V W X Y Z""",
)
mat = SubsMat.SeqMat(MatrixInfo.blosum90)
self.assertEqual(len(mat), 276)
self.checkMatrix(
mat,
"""\
A 5
B -2 4
C -1 -4 9
D -3 4 -5 7
E -1 0 -6 1 6
F -3 -4 -3 -5 -5 7
G 0 -2 -4 -2 -3 -5 6
H -2 -1 -5 -2 -1 -2 -3 8
I -2 -5 -2 -5 -4 -1 -5 -4 5
K -1 -1 -4 -1 0 -4 -2 -1 -4 6
L -2 -5 -2 -5 -4 0 -5 -4 1 -3 5
M -2 -4 -2 -4 -3 -1 -4 -3 1 -2 2 7
N -2 4 -4 1 -1 -4 -1 0 -4 0 -4 -3 7
P -1 -3 -4 -3 -2 -4 -3 -3 -4 -2 -4 -3 -3 8
Q -1 -1 -4 -1 2 -4 -3 1 -4 1 -3 0 0 -2 7
R -2 -2 -5 -3 -1 -4 -3 0 -4 2 -3 -2 -1 -3 1 6
S 1 0 -2 -1 -1 -3 -1 -2 -3 -1 -3 -2 0 -2 -1 -1 5
T 0 -1 -2 -2 -1 -3 -3 -2 -1 -1 -2 -1 0 -2 -1 -2 1 6
V -1 -4 -2 -5 -3 -2 -5 -4 3 -3 0 0 -4 -3 -3 -3 -2 -1 5
W -4 -6 -4 -6 -5 0 -4 -3 -4 -5 -3 -2 -5 -5 -3 -4 -4 -4 -3 11
X -1 -2 -3 -2 -2 -2 -2 -2 -2 -1 -2 -1 -2 -2 -1 -2 -1 -1 -2 -3 -2
Y -3 -4 -4 -4 -4 3 -5 1 -2 -3 -2 -2 -3 -4 -3 -3 -3 -2 -3 2 -2 8
Z -1 0 -5 0 4 -4 -3 0 -4 1 -4 -2 -1 -2 4 0 -1 -1 -3 -4 -1 -3 4
A B C D E F G H I K L M N P Q R S T V W X Y Z""",
)
mat = SubsMat.SeqMat(MatrixInfo.blosum95)
self.assertEqual(len(mat), 276)
self.checkMatrix(
mat,
"""\
A 5
B -3 4
C -1 -4 9
D -3 4 -5 7
E -1 0 -6 1 6
F -3 -5 -3 -5 -5 7
G -1 -2 -5 -2 -3 -5 6
H -3 -1 -5 -2 -1 -2 -3 9
I -2 -5 -2 -5 -4 -1 -6 -4 5
K -1 -1 -5 -2 0 -4 -3 -1 -4 6
L -2 -5 -3 -5 -4 0 -5 -4 1 -3 5
M -2 -4 -3 -5 -3 -1 -4 -3 1 -2 2 7
N -2 4 -4 1 -1 -4 -1 0 -4 0 -5 -3 7
P -1 -3 -5 -3 -2 -5 -4 -3 -4 -2 -4 -3 -3 8
Q -1 -1 -4 -1 2 -4 -3 1 -4 1 -3 -1 0 -2 7
R -2 -2 -5 -3 -1 -4 -4 -1 -4 2 -3 -2 -1 -3 0 7
S 1 -1 -2 -1 -1 -3 -1 -2 -3 -1 -3 -3 0 -2 -1 -2 5
T 0 -1 -2 -2 -2 -3 -3 -2 -2 -1 -2 -1 -1 -2 -1 -2 1 6
V -1 -5 -2 -5 -3 -2 -5 -4 3 -3 0 0 -4 -4 -3 -4 -3 -1 5
W -4 -6 -4 -6 -5 0 -5 -3 -4 -5 -3 -2 -5 -5 -3 -4 -4 -4 -3 11
X -1 -2 -3 -2 -2 -2 -3 -2 -2 -1 -2 -2 -2 -3 -1 -2 -1 -1 -2 -4 -2
Y -3 -4 -4 -5 -4 3 -5 1 -2 -3 -2 -3 -3 -5 -3 -3 -3 -3 -3 2 -2 8
Z -1 0 -5 0 4 -4 -3 0 -4 0 -4 -2 -1 -2 4 -1 -1 -2 -3 -4 -1 -4 4
A B C D E F G H I K L M N P Q R S T V W X Y Z""",
)
mat = SubsMat.SeqMat(MatrixInfo.feng)
self.assertEqual(len(mat), 210)
self.checkMatrix(
mat,
"""\
A 6
C 2 6
D 4 1 6
E 4 0 5 6
F 2 3 1 0 6
G 5 3 4 4 1 6
H 2 2 3 2 2 1 | |
from sqlalchemy import Column, String, DECIMAL, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.mysql import LONGTEXT
all_tables = ['ASHAREBALANCESHEET', 'ASHARECAPITALIZATION', 'ASHARECASHFLOW', 'ASHAREDESCRIPTION', 'ASHAREDIVIDEND', 'ASHAREFINANCIALINDICATOR', 'ASHAREFREEFLOAT', 'ASHAREINCOME', 'ASHAREINDUSTRIESCODE', 'ASHAREPROFITEXPRESS', 'ASHARERIGHTISSUE', 'CFUNDWINDINDEXCOMPONENT', 'CHINAMUTUALFUNDASSETPORTFOLIO', 'CHINAMUTUALFUNDNAV', 'CHINAMUTUALFUNDREPNAVPER', 'CHINAMUTUALFUNDSEATTRADING', 'CMFINDUSTRYPLATE', 'CMFIOPVNAV', 'CMFNAVOPERATIONRECORD', 'CMFTHEMECONCEPT', 'COMPINTRODUCTION', 'INDEXCONTRASTSECTOR']
Base = declarative_base()
metadata = Base.metadata
class ASHAREBALANCESHEET(Base):
__tablename__ = 'ASHAREBALANCESHEET'
ACC_EXP = Column(DECIMAL(20, 4), doc="预提费用")
ACCOUNTS_PAYABLE = Column(DECIMAL(20, 4), doc="应付票据及应付账款")
ACCOUNTS_RECEIVABLE = Column(DECIMAL(20, 4), doc="应收款项")
ACCOUNTS_RECEIVABLE_BILL = Column(DECIMAL(20, 4), doc="应收票据及应收账款")
ACCT_PAYABLE = Column(DECIMAL(20, 4), doc="应付账款")
ACCT_RCV = Column(DECIMAL(20, 4), doc="应收账款")
ACTING_TRADING_SEC = Column(DECIMAL(20, 4), doc="代理买卖证券款")
ACTING_UW_SEC = Column(DECIMAL(20, 4), doc="代理承销证券款")
ACTUAL_ANN_DT = Column(String(8), doc="实际公告日期")
ADV_FROM_CUST = Column(DECIMAL(20, 4), doc="预收款项")
AGENCY_BUS_ASSETS = Column(DECIMAL(20, 4), doc="代理业务资产")
AGENCY_BUS_LIAB = Column(DECIMAL(20, 4), doc="代理业务负债")
ANN_DT = Column(String(8), doc="公告日期")
ASSET_DEP_OTH_BANKS_FIN_INST = Column(DECIMAL(20, 4), doc="存放同业和其它金融机构款项")
BONDS_PAYABLE = Column(DECIMAL(20, 4), doc="应付债券")
BORROW_CENTRAL_BANK = Column(DECIMAL(20, 4), doc="向中央银行借款")
CAP_MRGN_PAID = Column(DECIMAL(20, 4), doc="存出资本保证金")
CAP_RSRV = Column(DECIMAL(20, 4), doc="资本公积金")
CAP_STK = Column(DECIMAL(20, 4), doc="股本")
CASH_DEPOSITS_CENTRAL_BANK = Column(DECIMAL(20, 4), doc="现金及存放中央银行款项")
CLAIMS_PAYABLE = Column(DECIMAL(20, 4), doc="应付赔付款")
CLIENTS_CAP_DEPOSIT = Column(DECIMAL(20, 4), doc="客户资金存款")
CLIENTS_RSRV_SETTLE = Column(DECIMAL(20, 4), doc="客户备付金")
CNVD_DIFF_FOREIGN_CURR_STAT = Column(DECIMAL(20, 4), doc="外币报表折算差额")
COMP_TYPE_CODE = Column(String(2), doc="公司类型代码")
CONST_IN_PROG = Column(DECIMAL(20, 4), doc="在建工程")
CONST_IN_PROG_TOT = Column(DECIMAL(20, 4), doc="在建工程(合计)(元)")
CONSUMPTIVE_BIO_ASSETS = Column(DECIMAL(20, 4), doc="消耗性生物资产")
CONTRACT_LIABILITIES = Column(DECIMAL(20, 4), doc="合同负债")
CONTRACTUAL_ASSETS = Column(DECIMAL(20, 4), doc="合同资产")
CRNCY_CODE = Column(String(10), doc="货币代码")
CUST_BANK_DEP = Column(DECIMAL(20, 4), doc="吸收存款")
DEBT_INVESTMENT = Column(DECIMAL(20, 4), doc="债权投资(元)")
DEFERRED_EXP = Column(DECIMAL(20, 4), doc="待摊费用")
DEFERRED_INC = Column(DECIMAL(20, 4), doc="递延收益")
DEFERRED_INC_NON_CUR_LIAB = Column(DECIMAL(20, 4), doc="递延收益-非流动负债")
DEFERRED_TAX_ASSETS = Column(DECIMAL(20, 4), doc="递延所得税资产")
DEFERRED_TAX_LIAB = Column(DECIMAL(20, 4), doc="递延所得税负债")
DEPOSIT_RECEIVED = Column(DECIMAL(20, 4), doc="存入保证金")
DEPOSIT_RECEIVED_IB_DEPOSITS = Column(DECIMAL(20, 4), doc="吸收存款及同业存放")
DERIVATIVE_FIN_ASSETS = Column(DECIMAL(20, 4), doc="衍生金融资产")
DERIVATIVE_FIN_LIAB = Column(DECIMAL(20, 4), doc="衍生金融负债")
DVD_PAYABLE = Column(DECIMAL(20, 4), doc="应付股利")
DVD_PAYABLE_INSURED = Column(DECIMAL(20, 4), doc="应付保单红利")
DVD_RCV = Column(DECIMAL(20, 4), doc="应收股利")
EMPL_BEN_PAYABLE = Column(DECIMAL(20, 4), doc="应付职工薪酬")
FIN_ASSETS_AVAIL_FOR_SALE = Column(DECIMAL(20, 4), doc="可供出售金融资产")
FIN_ASSETS_COST_SHARING = Column(DECIMAL(20, 4), doc="以摊余成本计量的金融资产")
FIN_ASSETS_FAIR_VALUE = Column(DECIMAL(20, 4), doc="以公允价值计量且其变动计入其他综合收益的金融资产")
FIX_ASSETS = Column(DECIMAL(20, 4), doc="固定资产")
FIX_ASSETS_DISP = Column(DECIMAL(20, 4), doc="固定资产清理")
FUND_SALES_FIN_ASSETS_RP = Column(DECIMAL(20, 4), doc="卖出回购金融资产款")
GOODWILL = Column(DECIMAL(20, 4), doc="商誉")
HANDLING_CHARGES_COMM_PAYABLE = Column(DECIMAL(20, 4), doc="应付手续费及佣金")
HELD_TO_MTY_INVEST = Column(DECIMAL(20, 4), doc="持有至到期投资")
HFS_ASSETS = Column(DECIMAL(20, 4), doc="持有待售的资产")
HFS_SALES = Column(DECIMAL(20, 4), doc="持有待售的负债")
INCL_PLEDGE_LOAN = Column(DECIMAL(20, 4), doc="其中:质押借款")
INCL_SEAT_FEES_EXCHANGE = Column(DECIMAL(20, 4), doc="其中:交易席位费")
INDEPENDENT_ACCT_ASSETS = Column(DECIMAL(20, 4), doc="独立账户资产")
INDEPENDENT_ACCT_LIAB = Column(DECIMAL(20, 4), doc="独立账户负债")
INSURED_DEPOSIT_INVEST = Column(DECIMAL(20, 4), doc="保户储金及投资款")
INSURED_PLEDGE_LOAN = Column(DECIMAL(20, 4), doc="保户质押贷款")
INT_PAYABLE = Column(DECIMAL(20, 4), doc="应付利息")
INT_RCV = Column(DECIMAL(20, 4), doc="应收利息")
INTANG_ASSETS = Column(DECIMAL(20, 4), doc="无形资产")
INVENTORIES = Column(DECIMAL(20, 4), doc="存货")
INVEST_REAL_ESTATE = Column(DECIMAL(20, 4), doc="投资性房地产")
LEASE_LIAB = Column(DECIMAL(20, 4), doc="租赁负债")
LENDING_FUNDS = Column(DECIMAL(20, 4), doc="融出资金")
LESS_TSY_STK = Column(DECIMAL(20, 4), doc="减:库存股")
LIAB_DEP_OTH_BANKS_FIN_INST = Column(DECIMAL(20, 4), doc="同业和其它金融机构存放款项")
LIFE_INSUR_RSRV = Column(DECIMAL(20, 4), doc="寿险责任准备金")
LOANS_AND_ADV_GRANTED = Column(DECIMAL(20, 4), doc="发放贷款及垫款")
LOANS_OTH_BANKS = Column(DECIMAL(20, 4), doc="拆入资金")
LOANS_TO_OTH_BANKS = Column(DECIMAL(20, 4), doc="拆出资金")
LONG_TERM_DEFERRED_EXP = Column(DECIMAL(20, 4), doc="长期待摊费用")
LONG_TERM_EQY_INVEST = Column(DECIMAL(20, 4), doc="长期股权投资")
LONG_TERM_REC = Column(DECIMAL(20, 4), doc="长期应收款")
LT_BORROW = Column(DECIMAL(20, 4), doc="长期借款")
LT_HEALTH_INSUR_V = Column(DECIMAL(20, 4), doc="长期健康险责任准备金")
LT_PAYABLE = Column(DECIMAL(20, 4), doc="长期应付款")
LT_PAYABLE_TOT = Column(DECIMAL(20, 4), doc="长期应付款(合计)(元)")
LT_PAYROLL_PAYABLE = Column(DECIMAL(20, 4), doc="长期应付职工薪酬")
MINORITY_INT = Column(DECIMAL(20, 4), doc="少数股东权益")
MONETARY_CAP = Column(DECIMAL(20, 4), doc="货币资金")
MRGN_PAID = Column(DECIMAL(20, 4), doc="存出保证金")
NON_CUR_ASSETS_DUE_WITHIN_1Y = Column(DECIMAL(20, 4), doc="一年内到期的非流动资产")
NON_CUR_LIAB_DUE_WITHIN_1Y = Column(DECIMAL(20, 4), doc="一年内到期的非流动负债")
NOTES_PAYABLE = Column(DECIMAL(20, 4), doc="应付票据")
NOTES_RCV = Column(DECIMAL(20, 4), doc="应收票据")
OBJECT_ID = Column(String(100), primary_key=True, doc="对象ID")
OIL_AND_NATURAL_GAS_ASSETS = Column(DECIMAL(20, 4), doc="油气资产")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
OTH_ASSETS = Column(DECIMAL(20, 4), doc="其他资产")
OTH_CUR_ASSETS = Column(DECIMAL(20, 4), doc="其他流动资产")
OTH_CUR_LIAB = Column(DECIMAL(20, 4), doc="其他流动负债")
OTH_LIAB = Column(DECIMAL(20, 4), doc="其他负债")
OTH_NON_CUR_ASSETS = Column(DECIMAL(20, 4), doc="其他非流动资产")
OTH_NON_CUR_LIAB = Column(DECIMAL(20, 4), doc="其他非流动负债")
OTH_PAYABLE = Column(DECIMAL(20, 4), doc="其他应付款")
OTH_PAYABLE_TOT = Column(DECIMAL(20, 4), doc="其他应付款(合计)(元)")
OTH_RCV = Column(DECIMAL(20, 4), doc="其他应收款")
OTH_RCV_TOT = Column(DECIMAL(20, 4), doc="其他应收款(合计)(元)")
OTHER_COMP_INCOME = Column(DECIMAL(20, 4), doc="其他综合收益")
OTHER_DEBT_INVESTMENT = Column(DECIMAL(20, 4), doc="其他债权投资(元)")
OTHER_EQUITY_INVESTMENT = Column(DECIMAL(20, 4), doc="其他权益工具投资(元)")
OTHER_EQUITY_TOOLS = Column(DECIMAL(20, 4), doc="其他权益工具")
OTHER_EQUITY_TOOLS_P_SHR = Column(DECIMAL(20, 4), doc="其他权益工具:优先股")
OTHER_ILLIQUIDFINANCIAL_ASSETS = Column(DECIMAL(20, 4), doc="其他非流动金融资产(元)")
OTHER_SUSTAINABLE_BOND = Column(DECIMAL(20, 4), doc="其他权益工具:永续债(元)")
OUT_LOSS_RSRV = Column(DECIMAL(20, 4), doc="未决赔款准备金")
PAYABLE_TO_REINSURER = Column(DECIMAL(20, 4), doc="应付分保账款")
PAYABLES = Column(DECIMAL(20, 4), doc="应付款项")
PRECIOUS_METALS = Column(DECIMAL(20, 4), doc="贵金属")
PREM_RCV = Column(DECIMAL(20, 4), doc="应收保费")
PREM_RECEIVED_ADV = Column(DECIMAL(20, 4), doc="预收保费")
PREPAY = Column(DECIMAL(20, 4), doc="预付款项")
PRODUCTIVE_BIO_ASSETS = Column(DECIMAL(20, 4), doc="生产性生物资产")
PROJ_MATL = Column(DECIMAL(20, 4), doc="工程物资")
PROV_NOM_RISKS = Column(DECIMAL(20, 4), doc="一般风险准备")
PROVISIONS = Column(DECIMAL(20, 4), doc="预计负债")
R_AND_D_COSTS = Column(DECIMAL(20, 4), doc="开发支出")
RCV_CEDED_CLAIM_RSRV = Column(DECIMAL(20, 4), doc="应收分保未决赔款准备金")
RCV_CEDED_LIFE_INSUR_RSRV = Column(DECIMAL(20, 4), doc="应收分保寿险责任准备金")
RCV_CEDED_LT_HEALTH_INSUR_RSRV = Column(DECIMAL(20, 4), doc="应收分保长期健康险责任准备金")
RCV_CEDED_UNEARNED_PREM_RSRV = Column(DECIMAL(20, 4), doc="应收分保未到期责任准备金")
RCV_FROM_CEDED_INSUR_CONT_RSRV = Column(DECIMAL(20, 4), doc="应收分保合同准备金")
RCV_FROM_REINSURER = Column(DECIMAL(20, 4), doc="应收分保账款")
RCV_INVEST = Column(DECIMAL(20, 4), doc="应收款项类投资")
RECEIVABLES_FINANCING = Column(DECIMAL(20, 4), doc="应收款项融资")
RED_MONETARY_CAP_FOR_SALE = Column(DECIMAL(20, 4), doc="买入返售金融资产")
REPORT_PERIOD = Column(String(8), doc="报告期")
RIGHT_USE_ASSETS = Column(DECIMAL(20, 4), doc="使用权资产")
RSRV_INSUR_CONT = Column(DECIMAL(20, 4), doc="保险合同准备金")
S_INFO_COMPCODE = Column(String(10), doc="公司ID")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
SETTLE_RSRV = Column(DECIMAL(20, 4), doc="结算备付金")
SPE_BAL_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="资产差额(特殊报表科目)")
SPE_BAL_LIAB_DIFF = Column(DECIMAL(20, 4), doc="负债差额(特殊报表科目)")
SPE_BAL_LIAB_EQY_DIFF = Column(DECIMAL(20, 4), doc="负债及股东权益差额(特殊报表项目)")
SPE_BAL_SHRHLDR_EQY_DIFF = Column(DECIMAL(20, 4), doc="股东权益差额(特殊报表科目)")
SPE_CUR_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="流动资产差额(特殊报表科目)")
SPE_CUR_LIAB_DIFF = Column(DECIMAL(20, 4), doc="流动负债差额(特殊报表科目)")
SPE_NON_CUR_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="非流动资产差额(特殊报表科目)")
SPE_NON_CUR_LIAB_DIFF = Column(DECIMAL(20, 4), doc="非流动负债差额(特殊报表科目)")
SPECIAL_RSRV = Column(DECIMAL(20, 4), doc="专项储备")
SPECIFIC_ITEM_PAYABLE = Column(DECIMAL(20, 4), doc="专项应付款")
ST_BONDS_PAYABLE = Column(DECIMAL(20, 4), doc="应付短期债券")
ST_BORROW = Column(DECIMAL(20, 4), doc="短期借款")
ST_FINANCING_PAYABLE = Column(DECIMAL(20, 4), doc="应付短期融资款")
STATEMENT_TYPE = Column(String(10), doc="报表类型")
STM_BS_TOT = Column(DECIMAL(20, 4), doc="固定资产(合计)(元)")
SUBR_REC = Column(DECIMAL(20, 4), doc="应收代位追偿款")
SURPLUS_RSRV = Column(DECIMAL(20, 4), doc="盈余公积金")
TAXES_SURCHARGES_PAYABLE = Column(DECIMAL(20, 4), doc="应交税费")
TIME_DEPOSITS = Column(DECIMAL(20, 4), doc="定期存款")
TOT_ASSETS = Column(DECIMAL(20, 4), doc="资产总计")
TOT_BAL_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="资产差额(合计平衡项目)")
TOT_BAL_LIAB_DIFF = Column(DECIMAL(20, 4), doc="负债差额(合计平衡项目)")
TOT_BAL_LIAB_EQY_DIFF = Column(DECIMAL(20, 4), doc="负债及股东权益差额(合计平衡项目)")
TOT_BAL_SHRHLDR_EQY_DIFF = Column(DECIMAL(20, 4), doc="股东权益差额(合计平衡项目)")
TOT_CUR_ASSETS = Column(DECIMAL(20, 4), doc="流动资产合计")
TOT_CUR_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="流动资产差额(合计平衡项目)")
TOT_CUR_LIAB = Column(DECIMAL(20, 4), doc="流动负债合计")
TOT_CUR_LIAB_DIFF = Column(DECIMAL(20, 4), doc="流动负债差额(合计平衡项目)")
TOT_LIAB = Column(DECIMAL(20, 4), doc="负债合计")
TOT_LIAB_SHRHLDR_EQY = Column(DECIMAL(20, 4), doc="负债及股东权益总计")
TOT_NON_CUR_ASSETS = Column(DECIMAL(20, 4), doc="非流动资产合计")
TOT_NON_CUR_ASSETS_DIFF = Column(DECIMAL(20, 4), doc="非流动资产差额(合计平衡项目)")
TOT_NON_CUR_LIAB = Column(DECIMAL(20, 4), doc="非流动负债合计")
TOT_NON_CUR_LIAB_DIFF = Column(DECIMAL(20, 4), doc="非流动负债差额(合计平衡项目)")
TOT_SHR = Column(DECIMAL(20, 4), doc="期末总股本")
TOT_SHRHLDR_EQY_EXCL_MIN_INT = Column(DECIMAL(20, 4), doc="股东权益合计(不含少数股东权益)")
TOT_SHRHLDR_EQY_INCL_MIN_INT = Column(DECIMAL(20, 4), doc="股东权益合计(含少数股东权益)")
TRADABLE_FIN_ASSETS = Column(DECIMAL(20, 4), doc="交易性金融资产")
TRADABLE_FIN_LIAB = Column(DECIMAL(20, 4), doc="交易性金融负债")
UNCONFIRMED_INVEST_LOSS = Column(DECIMAL(20, 4), doc="未确认的投资损失")
UNDISTRIBUTED_PROFIT = Column(DECIMAL(20, 4), doc="未分配利润")
UNEARNED_PREM_RSRV = Column(DECIMAL(20, 4), doc="未到期责任准备金")
WIND_CODE = Column(String(40), doc="Wind代码")
class ASHARECAPITALIZATION(Base):
__tablename__ = 'ASHARECAPITALIZATION'
ANN_DT = Column(String(8), doc="公告日期")
CHANGE_DT = Column(String(8), doc="变动日期")
CHANGE_DT1 = Column(String(8), doc="变动日期1")
CUR_SIGN = Column(DECIMAL(1, 0), doc="最新标志")
FLOAT_A_SHR = Column(DECIMAL(20, 4), doc="流通A股(万股)")
FLOAT_B_SHR = Column(DECIMAL(20, 4), doc="流通B股(万股)")
FLOAT_H_SHR = Column(DECIMAL(20, 4), doc="流通H股(万股)")
FLOAT_OVERSEAS_SHR = Column(DECIMAL(20, 4), doc="境外流通股(万股)")
FLOAT_SHR = Column(DECIMAL(20, 4), doc="流通股(万股)")
IS_VALID = Column(DECIMAL(5, 0), doc="是否有效")
NON_TRADABLE_SHR = Column(DECIMAL(20, 4), doc="非流通股")
OBJECT_ID = Column(String(100), primary_key=True, doc="对象ID")
OPDATE = Column(DateTime)
OPMODE = Column(String(1))
OTHER_RESTRICTED_SHR = Column(DECIMAL(20, 4), doc="其他限售股")
RESTRICTED_A_SHR = Column(DECIMAL(20, 4), doc="限售A股(万股)")
RESTRICTED_B_SHR = Column(DECIMAL(20, 4), doc="限售B股(万股)")
S_INFO_WINDCODE = Column(String(40), doc="Wind代码")
S_SHARE_CHANGEREASON = Column(String(30), doc="股本变动原因")
S_SHARE_H = Column(DECIMAL(20, 4), doc="香港上市股")
S_SHARE_NONTRADABLE = Column(DECIMAL(20, 4), doc="股改前非流通股")
S_SHARE_NTRD_DOMESINITOR = Column(DECIMAL(20, 4), doc="非流通股(境内法人股:境内发起人股)")
S_SHARE_NTRD_FUNDBAL = Column(DECIMAL(20, 4), doc="非流通股(境内法人股:基金持股)")
S_SHARE_NTRD_GENJURIS = Column(DECIMAL(20, 4), doc="非流通股(境内法人股:一般法人股)")
S_SHARE_NTRD_INSDEREMP = Column(DECIMAL(20, 4), doc="内部职工股(万股)")
S_SHARE_NTRD_IPOINIP = Column(DECIMAL(20, 4), doc="非流通股(自然人股)")
S_SHARE_NTRD_IPOJURIS = Column(DECIMAL(20, 4), doc="非流通股(境内法人股:募集法人股)")
S_SHARE_NTRD_NET = Column(DECIMAL(20, 4), doc="NET股(万股)")
S_SHARE_NTRD_NONLSTFRGN = Column(DECIMAL(20, 4), doc="非流通股(非上市外资股)")
S_SHARE_NTRD_PRFSHARE = Column(DECIMAL(20, 4), doc="优先股(万股)")
S_SHARE_NTRD_SNORMNGER = Column(DECIMAL(20, 4), doc="流通股(高管持股)")
S_SHARE_NTRD_STAQ = Column(DECIMAL(20, 4), doc="STAQ股(万股)")
S_SHARE_NTRD_STATE = Column(DECIMAL(20, 4), doc="非流通股(国家股)")
S_SHARE_NTRD_STATE_PCT = Column(DECIMAL(20, 4), doc="非流通股(国有股)")
S_SHARE_NTRD_STATJUR = Column(DECIMAL(20, 4), doc="非流通股(国有法人股)")
S_SHARE_NTRD_STRTINVESTOR = Column(DECIMAL(20, 4), doc="非流通股(境内法人股:战略投资者持股)")
S_SHARE_NTRD_SUBDOMESJUR = Column(DECIMAL(20, 4), doc="非流通股(境内法人股)")
S_SHARE_NTRD_TRFNSHARE = Column(DECIMAL(20, 4), doc="转配股(万股)")
S_SHARE_OTCA = Column(DECIMAL(20, 4), doc="三板A股")
S_SHARE_OTCB = Column(DECIMAL(20, 4), doc="三板B股")
S_SHARE_RTD_DOMESJUR = Column(DECIMAL(20, 4), doc="限售A股(其他内资持股:境内法人持股)")
S_SHARE_RTD_DOMESNP = Column(DECIMAL(20, 4), doc="限售A股(其他内资持股:境内自然人持股)")
S_SHARE_RTD_FRGNJUR = Column(DECIMAL(20, 4), doc="限售A股(境外法人持股)")
S_SHARE_RTD_FRGNNP = Column(DECIMAL(20, 4), doc="限售A股(境外自然人持股)")
S_SHARE_RTD_INST = Column(DECIMAL(20, 4), doc="限售A股(其他内资持股:机构配售股)")
S_SHARE_RTD_SENMANAGER = Column(DECIMAL(20, 4), doc="限售股份(高管持股)(万股)")
S_SHARE_RTD_STATE = Column(DECIMAL(20, 4), doc="限售A股(国家持股)")
S_SHARE_RTD_STATEJUR = Column(DECIMAL(20, 4), doc="限售A股(国有法人持股)")
S_SHARE_RTD_SUBFRGN = Column(DECIMAL(20, 4), doc="限售A股(外资持股)")
S_SHARE_RTD_SUBOTHERDOMES = Column(DECIMAL(20, 4), doc="限售A股(其他内资持股)")
S_SHARE_TOTALA = Column(DECIMAL(20, 4), doc="A股合计")
S_SHARE_TOTALB = Column(DECIMAL(20, 4), doc="B股合计")
S_SHARE_TOTALOTC = Column(DECIMAL(20, 4), doc="三板合计")
S_SHARE_TOTALRESTRICTED = Column(DECIMAL(20, 4), doc="限售股合计")
S_SHARE_TOTALTRADABLE = Column(DECIMAL(20, 4), doc="流通股合计")
TOT_SHR = Column(DECIMAL(20, 4), doc="总股本(万股)")
WIND_CODE = Column(String(40), doc="Wind代码")
class ASHARECASHFLOW(Base):
__tablename__ = 'ASHARECASHFLOW'
ACTUAL_ANN_DT = Column(String(8), doc="实际公告日期")
AMORT_INTANG_ASSETS = Column(DECIMAL(20, 4), doc="无形资产摊销")
AMORT_LT_DEFERRED_EXP = Column(DECIMAL(20, 4), doc="长期待摊费用摊销")
ANN_DT = Column(String(8), doc="公告日期")
CASH_CASH_EQU_BEG_PERIOD = Column(DECIMAL(20, 4), doc="期初现金及现金等价物余额")
CASH_CASH_EQU_END_PERIOD = Column(DECIMAL(20, 4), doc="期末现金及现金等价物余额")
CASH_PAID_INVEST = Column(DECIMAL(20, 4), doc="投资支付的现金")
CASH_PAY_ACQ_CONST_FIOLTA = Column(DECIMAL(20, 4), doc="购建固定资产、无形资产和其他长期资产支付的现金")
CASH_PAY_BEH_EMPL = Column(DECIMAL(20, 4), doc="支付给职工以及为职工支付的现金")
CASH_PAY_CLAIMS_ORIG_INCO = Column(DECIMAL(20, 4), doc="支付原保险合同赔付款项的现金")
CASH_PAY_DIST_DPCP_INT_EXP = Column(DECIMAL(20, 4), doc="分配股利、利润或偿付利息支付的现金")
CASH_PAY_GOODS_PURCH_SERV_REC = Column(DECIMAL(20, | |
pass
# Exit a parse tree produced by SQLParser#cacheKeyList.
def exitCacheKeyList(self, ctx:SQLParser.CacheKeyListContext):
pass
# Enter a parse tree produced by SQLParser#keyUsageElement.
def enterKeyUsageElement(self, ctx:SQLParser.KeyUsageElementContext):
pass
# Exit a parse tree produced by SQLParser#keyUsageElement.
def exitKeyUsageElement(self, ctx:SQLParser.KeyUsageElementContext):
pass
# Enter a parse tree produced by SQLParser#keyUsageList.
def enterKeyUsageList(self, ctx:SQLParser.KeyUsageListContext):
pass
# Exit a parse tree produced by SQLParser#keyUsageList.
def exitKeyUsageList(self, ctx:SQLParser.KeyUsageListContext):
pass
# Enter a parse tree produced by SQLParser#flushOption.
def enterFlushOption(self, ctx:SQLParser.FlushOptionContext):
pass
# Exit a parse tree produced by SQLParser#flushOption.
def exitFlushOption(self, ctx:SQLParser.FlushOptionContext):
pass
# Enter a parse tree produced by SQLParser#logType.
def enterLogType(self, ctx:SQLParser.LogTypeContext):
pass
# Exit a parse tree produced by SQLParser#logType.
def exitLogType(self, ctx:SQLParser.LogTypeContext):
pass
# Enter a parse tree produced by SQLParser#flushTables.
def enterFlushTables(self, ctx:SQLParser.FlushTablesContext):
pass
# Exit a parse tree produced by SQLParser#flushTables.
def exitFlushTables(self, ctx:SQLParser.FlushTablesContext):
pass
# Enter a parse tree produced by SQLParser#flushTablesOptions.
def enterFlushTablesOptions(self, ctx:SQLParser.FlushTablesOptionsContext):
pass
# Exit a parse tree produced by SQLParser#flushTablesOptions.
def exitFlushTablesOptions(self, ctx:SQLParser.FlushTablesOptionsContext):
pass
# Enter a parse tree produced by SQLParser#preloadTail.
def enterPreloadTail(self, ctx:SQLParser.PreloadTailContext):
pass
# Exit a parse tree produced by SQLParser#preloadTail.
def exitPreloadTail(self, ctx:SQLParser.PreloadTailContext):
pass
# Enter a parse tree produced by SQLParser#preloadList.
def enterPreloadList(self, ctx:SQLParser.PreloadListContext):
pass
# Exit a parse tree produced by SQLParser#preloadList.
def exitPreloadList(self, ctx:SQLParser.PreloadListContext):
pass
# Enter a parse tree produced by SQLParser#preloadKeys.
def enterPreloadKeys(self, ctx:SQLParser.PreloadKeysContext):
pass
# Exit a parse tree produced by SQLParser#preloadKeys.
def exitPreloadKeys(self, ctx:SQLParser.PreloadKeysContext):
pass
# Enter a parse tree produced by SQLParser#adminPartition.
def enterAdminPartition(self, ctx:SQLParser.AdminPartitionContext):
pass
# Exit a parse tree produced by SQLParser#adminPartition.
def exitAdminPartition(self, ctx:SQLParser.AdminPartitionContext):
pass
# Enter a parse tree produced by SQLParser#resourceGroupManagement.
def enterResourceGroupManagement(self, ctx:SQLParser.ResourceGroupManagementContext):
pass
# Exit a parse tree produced by SQLParser#resourceGroupManagement.
def exitResourceGroupManagement(self, ctx:SQLParser.ResourceGroupManagementContext):
pass
# Enter a parse tree produced by SQLParser#createResourceGroup.
def enterCreateResourceGroup(self, ctx:SQLParser.CreateResourceGroupContext):
pass
# Exit a parse tree produced by SQLParser#createResourceGroup.
def exitCreateResourceGroup(self, ctx:SQLParser.CreateResourceGroupContext):
pass
# Enter a parse tree produced by SQLParser#resourceGroupVcpuList.
def enterResourceGroupVcpuList(self, ctx:SQLParser.ResourceGroupVcpuListContext):
pass
# Exit a parse tree produced by SQLParser#resourceGroupVcpuList.
def exitResourceGroupVcpuList(self, ctx:SQLParser.ResourceGroupVcpuListContext):
pass
# Enter a parse tree produced by SQLParser#vcpuNumOrRange.
def enterVcpuNumOrRange(self, ctx:SQLParser.VcpuNumOrRangeContext):
pass
# Exit a parse tree produced by SQLParser#vcpuNumOrRange.
def exitVcpuNumOrRange(self, ctx:SQLParser.VcpuNumOrRangeContext):
pass
# Enter a parse tree produced by SQLParser#resourceGroupPriority.
def enterResourceGroupPriority(self, ctx:SQLParser.ResourceGroupPriorityContext):
pass
# Exit a parse tree produced by SQLParser#resourceGroupPriority.
def exitResourceGroupPriority(self, ctx:SQLParser.ResourceGroupPriorityContext):
pass
# Enter a parse tree produced by SQLParser#resourceGroupEnableDisable.
def enterResourceGroupEnableDisable(self, ctx:SQLParser.ResourceGroupEnableDisableContext):
pass
# Exit a parse tree produced by SQLParser#resourceGroupEnableDisable.
def exitResourceGroupEnableDisable(self, ctx:SQLParser.ResourceGroupEnableDisableContext):
pass
# Enter a parse tree produced by SQLParser#alterResourceGroup.
def enterAlterResourceGroup(self, ctx:SQLParser.AlterResourceGroupContext):
pass
# Exit a parse tree produced by SQLParser#alterResourceGroup.
def exitAlterResourceGroup(self, ctx:SQLParser.AlterResourceGroupContext):
pass
# Enter a parse tree produced by SQLParser#setResourceGroup.
def enterSetResourceGroup(self, ctx:SQLParser.SetResourceGroupContext):
pass
# Exit a parse tree produced by SQLParser#setResourceGroup.
def exitSetResourceGroup(self, ctx:SQLParser.SetResourceGroupContext):
pass
# Enter a parse tree produced by SQLParser#threadIdList.
def enterThreadIdList(self, ctx:SQLParser.ThreadIdListContext):
pass
# Exit a parse tree produced by SQLParser#threadIdList.
def exitThreadIdList(self, ctx:SQLParser.ThreadIdListContext):
pass
# Enter a parse tree produced by SQLParser#dropResourceGroup.
def enterDropResourceGroup(self, ctx:SQLParser.DropResourceGroupContext):
pass
# Exit a parse tree produced by SQLParser#dropResourceGroup.
def exitDropResourceGroup(self, ctx:SQLParser.DropResourceGroupContext):
pass
# Enter a parse tree produced by SQLParser#utilityStatement.
def enterUtilityStatement(self, ctx:SQLParser.UtilityStatementContext):
pass
# Exit a parse tree produced by SQLParser#utilityStatement.
def exitUtilityStatement(self, ctx:SQLParser.UtilityStatementContext):
pass
# Enter a parse tree produced by SQLParser#describeStatement.
def enterDescribeStatement(self, ctx:SQLParser.DescribeStatementContext):
pass
# Exit a parse tree produced by SQLParser#describeStatement.
def exitDescribeStatement(self, ctx:SQLParser.DescribeStatementContext):
pass
# Enter a parse tree produced by SQLParser#explainStatement.
def enterExplainStatement(self, ctx:SQLParser.ExplainStatementContext):
pass
# Exit a parse tree produced by SQLParser#explainStatement.
def exitExplainStatement(self, ctx:SQLParser.ExplainStatementContext):
pass
# Enter a parse tree produced by SQLParser#explainableStatement.
def enterExplainableStatement(self, ctx:SQLParser.ExplainableStatementContext):
pass
# Exit a parse tree produced by SQLParser#explainableStatement.
def exitExplainableStatement(self, ctx:SQLParser.ExplainableStatementContext):
pass
# Enter a parse tree produced by SQLParser#helpCommand.
def enterHelpCommand(self, ctx:SQLParser.HelpCommandContext):
pass
# Exit a parse tree produced by SQLParser#helpCommand.
def exitHelpCommand(self, ctx:SQLParser.HelpCommandContext):
pass
# Enter a parse tree produced by SQLParser#useCommand.
def enterUseCommand(self, ctx:SQLParser.UseCommandContext):
pass
# Exit a parse tree produced by SQLParser#useCommand.
def exitUseCommand(self, ctx:SQLParser.UseCommandContext):
pass
# Enter a parse tree produced by SQLParser#restartServer.
def enterRestartServer(self, ctx:SQLParser.RestartServerContext):
pass
# Exit a parse tree produced by SQLParser#restartServer.
def exitRestartServer(self, ctx:SQLParser.RestartServerContext):
pass
# Enter a parse tree produced by SQLParser#exprOr.
def enterExprOr(self, ctx:SQLParser.ExprOrContext):
pass
# Exit a parse tree produced by SQLParser#exprOr.
def exitExprOr(self, ctx:SQLParser.ExprOrContext):
pass
# Enter a parse tree produced by SQLParser#exprNot.
def enterExprNot(self, ctx:SQLParser.ExprNotContext):
pass
# Exit a parse tree produced by SQLParser#exprNot.
def exitExprNot(self, ctx:SQLParser.ExprNotContext):
pass
# Enter a parse tree produced by SQLParser#exprIs.
def enterExprIs(self, ctx:SQLParser.ExprIsContext):
pass
# Exit a parse tree produced by SQLParser#exprIs.
def exitExprIs(self, ctx:SQLParser.ExprIsContext):
pass
# Enter a parse tree produced by SQLParser#exprAnd.
def enterExprAnd(self, ctx:SQLParser.ExprAndContext):
pass
# Exit a parse tree produced by SQLParser#exprAnd.
def exitExprAnd(self, ctx:SQLParser.ExprAndContext):
pass
# Enter a parse tree produced by SQLParser#exprXor.
def enterExprXor(self, ctx:SQLParser.ExprXorContext):
pass
# Exit a parse tree produced by SQLParser#exprXor.
def exitExprXor(self, ctx:SQLParser.ExprXorContext):
pass
# Enter a parse tree produced by SQLParser#primaryExprPredicate.
def enterPrimaryExprPredicate(self, ctx:SQLParser.PrimaryExprPredicateContext):
pass
# Exit a parse tree produced by SQLParser#primaryExprPredicate.
def exitPrimaryExprPredicate(self, ctx:SQLParser.PrimaryExprPredicateContext):
pass
# Enter a parse tree produced by SQLParser#primaryExprCompare.
def enterPrimaryExprCompare(self, ctx:SQLParser.PrimaryExprCompareContext):
pass
# Exit a parse tree produced by SQLParser#primaryExprCompare.
def exitPrimaryExprCompare(self, ctx:SQLParser.PrimaryExprCompareContext):
pass
# Enter a parse tree produced by SQLParser#primaryExprAllAny.
def enterPrimaryExprAllAny(self, ctx:SQLParser.PrimaryExprAllAnyContext):
pass
# Exit a parse tree produced by SQLParser#primaryExprAllAny.
def exitPrimaryExprAllAny(self, ctx:SQLParser.PrimaryExprAllAnyContext):
pass
# Enter a parse tree produced by SQLParser#primaryExprIsNull.
def enterPrimaryExprIsNull(self, ctx:SQLParser.PrimaryExprIsNullContext):
pass
# Exit a parse tree produced by SQLParser#primaryExprIsNull.
def exitPrimaryExprIsNull(self, ctx:SQLParser.PrimaryExprIsNullContext):
pass
# Enter a parse tree produced by SQLParser#compOp.
def enterCompOp(self, ctx:SQLParser.CompOpContext):
pass
# Exit a parse tree produced by SQLParser#compOp.
def exitCompOp(self, ctx:SQLParser.CompOpContext):
pass
# Enter a parse tree produced by SQLParser#predicate.
def enterPredicate(self, ctx:SQLParser.PredicateContext):
pass
# Exit a parse tree produced by SQLParser#predicate.
def exitPredicate(self, ctx:SQLParser.PredicateContext):
pass
# Enter a parse tree produced by SQLParser#predicateExprIn.
def enterPredicateExprIn(self, ctx:SQLParser.PredicateExprInContext):
pass
# Exit a parse tree produced by SQLParser#predicateExprIn.
def exitPredicateExprIn(self, ctx:SQLParser.PredicateExprInContext):
pass
# Enter a parse tree produced by SQLParser#predicateExprBetween.
def enterPredicateExprBetween(self, ctx:SQLParser.PredicateExprBetweenContext):
pass
# Exit a parse tree produced by SQLParser#predicateExprBetween.
def exitPredicateExprBetween(self, ctx:SQLParser.PredicateExprBetweenContext):
pass
# Enter a parse tree produced by SQLParser#predicateExprLike.
def enterPredicateExprLike(self, ctx:SQLParser.PredicateExprLikeContext):
pass
# Exit a parse tree produced by SQLParser#predicateExprLike.
def exitPredicateExprLike(self, ctx:SQLParser.PredicateExprLikeContext):
pass
# Enter a parse tree produced by SQLParser#predicateExprRegex.
def enterPredicateExprRegex(self, ctx:SQLParser.PredicateExprRegexContext):
pass
# Exit a parse tree produced by SQLParser#predicateExprRegex.
def exitPredicateExprRegex(self, ctx:SQLParser.PredicateExprRegexContext):
pass
# Enter a parse tree produced by SQLParser#bitExpr.
def enterBitExpr(self, ctx:SQLParser.BitExprContext):
pass
# Exit a parse tree produced by SQLParser#bitExpr.
def exitBitExpr(self, ctx:SQLParser.BitExprContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprConvert.
def enterSimpleExprConvert(self, ctx:SQLParser.SimpleExprConvertContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprConvert.
def exitSimpleExprConvert(self, ctx:SQLParser.SimpleExprConvertContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprVariable.
def enterSimpleExprVariable(self, ctx:SQLParser.SimpleExprVariableContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprVariable.
def exitSimpleExprVariable(self, ctx:SQLParser.SimpleExprVariableContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprCast.
def enterSimpleExprCast(self, ctx:SQLParser.SimpleExprCastContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprCast.
def exitSimpleExprCast(self, ctx:SQLParser.SimpleExprCastContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprUnary.
def enterSimpleExprUnary(self, ctx:SQLParser.SimpleExprUnaryContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprUnary.
def exitSimpleExprUnary(self, ctx:SQLParser.SimpleExprUnaryContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprOdbc.
def enterSimpleExprOdbc(self, ctx:SQLParser.SimpleExprOdbcContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprOdbc.
def exitSimpleExprOdbc(self, ctx:SQLParser.SimpleExprOdbcContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprRuntimeFunction.
def enterSimpleExprRuntimeFunction(self, ctx:SQLParser.SimpleExprRuntimeFunctionContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprRuntimeFunction.
def exitSimpleExprRuntimeFunction(self, ctx:SQLParser.SimpleExprRuntimeFunctionContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprFunction.
def enterSimpleExprFunction(self, ctx:SQLParser.SimpleExprFunctionContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprFunction.
def exitSimpleExprFunction(self, ctx:SQLParser.SimpleExprFunctionContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprCollate.
def enterSimpleExprCollate(self, ctx:SQLParser.SimpleExprCollateContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprCollate.
def exitSimpleExprCollate(self, ctx:SQLParser.SimpleExprCollateContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprMatch.
def enterSimpleExprMatch(self, ctx:SQLParser.SimpleExprMatchContext):
pass
# Exit a parse tree produced by SQLParser#simpleExprMatch.
def exitSimpleExprMatch(self, ctx:SQLParser.SimpleExprMatchContext):
pass
# Enter a parse tree produced by SQLParser#simpleExprWindowingFunction.
def enterSimpleExprWindowingFunction(self, ctx:SQLParser.SimpleExprWindowingFunctionContext):
pass
# Exit a parse tree produced by | |
#!/usr/bin/env python
# coding: utf-8
"""
Classifiers implemented:
* Decision tree: See http://en.wikipedia.org/wiki/Decision_tree_learning
* Naive Bayes: See http://en.wikipedia.org/wiki/Naive_Bayes_classifier
* K-Nearest Neighbor: See http://en.wikipedia.org/wiki/K-nearest_neighbor
"""
import numpy
from collections import defaultdict
from simpleai.machine_learning.models import Classifier
from simpleai.machine_learning.metrics import Counter, OnlineInformationGain, \
OnlineLogProbability
try:
import cPickle as pickle
except ImportError:
import pickle
class DecisionTreeLearner(Classifier):
"""
This implementation features an algorithm that *strictly* follows the
pseudocode given in AIMA.
It's obviously ineficient in too many ways (perhaps incomplete too), but
it's intended to be used pedagogically.
See the other implementations in this same file for some discusión and
issues solved.
This algorithm is equivalent to ID3.
"""
def __init__(self, dataset, problem):
self.dataset = dataset
self.problem = problem
self.root = self.learn(dataset, set(self.attributes), dataset)
def learn(self, examples, attributes, parent_examples):
"""
A decision tree learner that *strictly* follows the pseudocode given in
AIMA. In 3rd edition, see Figure 18.5, page 702.
"""
if not examples:
return self.plurality_value(parent_examples)
elif len(set(map(self.target, examples))) == 1:
return self.plurality_value(examples)
elif not attributes:
return self.plurality_value(examples)
A = max(attributes, key=lambda a: self.importance(a, examples))
tree = DecisionTreeNode(attribute=A)
for value in set(map(A, examples)):
exs = [e for e in examples if A(e) == value]
subtree = self.learn(exs, attributes - set([A]), examples)
tree.add_branch(value, subtree)
return tree
def plurality_value(self, examples):
if not examples:
raise ValueError("Dataset is empty")
counter = Counter(self.target)
for example in examples:
counter.add(example)
tree = DecisionTreeNode()
# Note that tie is *not* solved randomly here
tree.set_results_from_counts(counter)
return tree
def importance(self, attribute, examples):
"""
AIMA implies that importance should be information gain.
Since AIMA only defines it for binary features this implementation
was based on the wikipedia article:
http://en.wikipedia.org/wiki/Information_gain_in_decision_trees
"""
gain_counter = OnlineInformationGain(attribute, self.target)
for example in examples:
gain_counter.add(example)
return gain_counter.get_gain()
def classify(self, example):
node = walk_to_leaf(self.root, example)
return node.result
class NaiveBayes(Classifier):
"""
Implements a classifier that uses the Bayes' theorem.
"""
def learn(self):
# Frequency count of target classes
self.C = OnlineLogProbability()
# Frequency count of P(Fi|C):
self.Fi = defaultdict(lambda: # For each class,
defaultdict(lambda: # For each attribute,
OnlineLogProbability())) # For each value, count it
for example in self.dataset:
class_ = self.target(example)
self.C.add(class_)
for attribute in self.attributes:
value = attribute(example)
self.Fi[class_][attribute].add(value)
if not self.C:
raise ValueError("Dataset is empty")
# Cripple defaultdict to a regular dict, so now it can rasie KeyError
self.Fi.default_factory = None
for d in self.Fi.itervalues():
d.default_factory = None
def classify(self, example):
values = [(attribute, attribute(example))
for attribute in self.attributes]
hypotheses = []
for class_ in self.C:
try:
ps = [self.Fi[class_][attr][val] for attr, val in values]
except KeyError:
continue # A value not seen in training, so Prob(class) == 0
ps.append(self.C[class_])
hypotheses.append((sum(ps), class_))
if hypotheses:
logprob, best = max(hypotheses)
Z = numpy.logaddexp.reduce([p for p, class_ in hypotheses])
logprob = logprob - Z
else: # Something not at all seen in training, return best a priori
logprob, best = max((p, class_) for class_, p
in self.C.iteritems())
p = numpy.exp(logprob)
assert 0.0 <= p and p <= 1.0
return best, p
class KNearestNeighbors(Classifier):
"""
Classifies objects based on closest training example.
Uses the k-nearest examples from the training and
gets the most common classification among these.
To use this classifier the problem must define a `distance`
method to messure the distance between two examples.
"""
def __init__(self, dataset, problem, k=1):
self.k = k
super(KNearestNeighbors, self).__init__(dataset, problem)
def learn(self):
try:
next(iter(self.dataset))
except StopIteration:
raise ValueError("Empty dataset")
try:
example = next(iter(self.dataset))
self.problem.distance(example, example)
except NotImplementedError:
message = "Classification problem not suitable for KNN. " \
"A problem with a distance defined is needed."
raise ValueError(message)
def classify(self, example):
distances = [(self.problem.distance(e, example), e)
for e in self.dataset]
best = sorted(distances)[:self.k]
counter = Counter(self.problem.target)
for _, example in best:
counter.add(example)
items = [(x[1], x[0]) for x in counter.iteritems()]
items.sort(reverse=True)
return (items[0][1], items[0][0] / counter.total)
def save(self, filepath):
"""
Saves the classifier to `filepath`.
Because this classifier needs to save the dataset, it must
be something that can be pickled and not something like an
iterator.
"""
if not filepath or not isinstance(filepath, basestring):
raise ValueError("Invalid filepath")
with open(filepath, "w") as filehandler:
pickle.dump(self, filehandler)
def path_to_leaf(node, example):
while node is not None:
yield node
node = node.take_branch(example)
def walk_to_leaf(node, example):
for node in path_to_leaf(node, example):
pass
return node
def iter_tree(root):
q = [(None, root, 0)]
while q:
value, node, depth = q.pop()
yield value, node, depth
for value, child in node.branches.iteritems():
q.append((value, child, depth + 1))
def tree_to_str(root):
"""
Returns a string representation of a decision tree with
root node `root`.
"""
xs = []
for value, node, depth in iter_tree(root):
template = "{indent}"
if node is not root:
template += "case={value}\t"
if node.attribute is None:
template += "result={result} -- P={prob:.2}"
else:
template += "split by {split}:\t" +\
"(partial result={result} -- P={prob:.2})"
line = template.format(indent=" " * depth,
value=value,
result=node.result[0],
prob=node.result[1],
split=str(node.attribute))
xs.append(line)
return "\n".join(xs)
class DecisionTreeNode(object):
"""
A node of a decision tree.
"""
def __init__(self, attribute=None):
self.branches = {}
self.attribute = attribute
self.parent = None
self.result = None
def take_branch(self, example):
"""
Returns a `DecisionTreeNode` instance that can better classify
`example` based on the selectors value.
If there are no more branches (ie, this node is a leaf) or the
attribute gives a value for an unexistent branch then this method
returns None.
"""
if self.attribute is None:
return None
value = self.attribute(example)
return self.branches.get(value, None)
def set_results_from_counts(self, counts):
self.counts = counts
total = sum(counts.itervalues())
majority = max(counts, key=counts.get) # Max frequency
self.result = (majority, counts[majority] / float(total))
def add_branch(self, value, branch=None):
assert not value in self.branches
if branch is None:
branch = self.__class__()
self.branches[value] = branch
branch.parent = self
return branch
class DecisionTreeLearner_Queued(Classifier):
"""
This implementations has a few improvements over the one based on the book:
-It uses a queue instead of recursion, so the python stack limit is
never reached.
-In case an attribute has a value not seen in training the intermediate
nodes can give a "best so far" classification.
-Abusive re-iteration of the train examples is avoided by calculating
at the same time all information gains of a single node split.
This algorithm is equivalent to ID3.
"""
def learn(self):
if not self.attributes:
self.root = self._single_node_tree()
return
self.root = DecisionTreeNode()
q = [(self.root, self.dataset)]
while q:
node, examples = q.pop()
A = self._max_gain_split(examples)
counts = A.get_target_class_counts()
branches = A.get_branches()
# Base case exception
if node is self.root:
node.set_results_from_counts(counts)
if len(counts) == 1:
continue # Avoid splitting when there's a single target class
if len(branches) == 1:
continue # Avoid splitting when there's a single child branch
# Finally, go ahead and split
node.attribute = A.attribute
for value, counts in A.get_branches():
branch = node.add_branch(value)
branch.set_results_from_counts(counts)
bdataset = [e for e in examples if node.attribute(e) == value]
q.append((branch, bdataset))
def _max_gain_split(self, examples):
"""
Returns an OnlineInformationGain of the attribute with
max gain based on `examples`.
"""
gains = self._new_set_of_gain_counters()
for example in examples:
for gain in gains:
gain.add(example)
winner = max(gains, key=lambda gain: gain.get_gain())
if not winner.get_target_class_counts():
raise ValueError("Dataset is empty")
return winner
def _new_set_of_gain_counters(self):
"""
Creates a new set of OnlineInformationGain objects
for each attribute.
"""
return [OnlineInformationGain(attribute, self.target)
for attribute in self.attributes]
def _single_node_tree(self):
c = Counter(self.target)
for example in self.dataset:
c.add(example)
node = DecisionTreeNode()
node.set_results_from_counts(c)
return node
def classify(self, example):
node = walk_to_leaf(self.root, example)
return node.result
class DecisionTreeLearner_LargeData(DecisionTreeLearner_Queued):
"""
This implementations is specifically designed to handle large dataset that
don't fit into memory and has more improvements over the queued one:
-Data is processed one-at-a-time, so the training data doesn't need to
fit in memory.
-The amount of times the train data is read is aproximately log(N) full
iterations (from first to last) for a dataset with N examples.
This is because the gain from all splits from all leaf nodes are
estimated simultaneuosly, so every time the train data is read
completely a full new level of the tree (ie, nodes with equal depth,
leaves) is expanded simultaneously.
This algorithm is equivalent to ID3.
Is very important to note that in order to have a small memory footprint
the | |
ofilename = None
if os.path.exists(wdir):
old_wdir=advance_kwargs.pop('wdir')
cluster = load_cluster(
ctype=cluster.ctype,units=cluster.units_init,origin=cluster.origin_init,orbit=orbit,filename=filename,load_function=load_function,wdir=wdir,ofilename=ofilename,**advance_kwargs
)
if cluster.ntot != 0.0:
# Add galpy orbit if given
if orbit != None:
cluster.orbit = orbit
if cluster.units == "pckms" or cluster.units == "kpckms":
t = (cluster.tphys / 1000.0) / conversion.time_in_Gyr(
ro=8.0, vo=220.0
)
elif cluster.units == "nbody":
t = (
cluster.tphys * cluster.tbar / 1000.0
) / conversion.time_in_Gyr(ro=8.0, vo=220.0)
elif cluster.units == "galpy":
t = cluster.tphys
cluster.add_orbit(
orbit.x(t),
orbit.y(t),
orbit.z(t),
orbit.vx(t),
orbit.vy(t),
orbit.vz(t),
)
return cluster
def _get_filename(filename,**kwargs):
"""assemble filename from **kwargs
Parameters
----------
filename : str or None
given filename to read in cluster data
Returns
-------
filename : str
Other Parameters
----------------
Same as load_cluster
History
-------
2021 - Written - Webb (UofT)
"""
nzfill = int(kwargs.get("nzfill", 1))
nsnap = int(kwargs.get("nsnap", "0"))
wdir = kwargs.get("wdir", "./")
snapdir = kwargs.get("snapdir", "snaps/")
snapbase = kwargs.get("snapbase", "")
snapend = kwargs.get("snapend", ".dat")
if filename != None:
if os.path.isfile(filename):
pass
elif os.path.isfile("%s%s%s" % (wdir, snapdir, filename)):
filename="%s%s%s" % (wdir, snapdir, filename)
elif os.path.isfile("%s%s" % (wdir, filename)):
filename="%s%s" % (wdir, filename)
else:
filename=None
elif os.path.isfile(
"%s%s%s%s%s" % (wdir, snapdir, snapbase, str(nsnap).zfill(nzfill), snapend)
):
filename = "%s%s%s%s%s" % (
wdir,
snapdir,
snapbase,
str(nsnap).zfill(nzfill),
snapend,
)
elif os.path.isfile(
"%s%s%s%s" % (wdir, snapbase, str(nsnap).zfill(nzfill), snapend)
):
filename = "%s%s%s%s" % (wdir, snapbase, str(nsnap).zfill(nzfill), snapend)
else:
filename = None
return filename
def _get_advanced_kwargs(cluster, **kwargs):
"""get **kwargs from current cluster before advancing
Parameters
----------
cluster - class
StarCluster to be advanced
Returns
-------
None
Other Parameters
----------------
Same as load_cluster
History
-------
2018 - Written - Webb (UofT)
"""
nsnap = np.maximum(int(kwargs.pop("nsnap", 0)), cluster.nsnap) + 1
delimiter = kwargs.pop("delimiter", cluster.delimiter)
wdir = kwargs.pop("wdir", cluster.wdir)
nzfill = int(kwargs.pop("nzfill", cluster.nzfill))
snapbase = kwargs.pop("snapbase", cluster.snapbase)
snapend = kwargs.pop("snapend", cluster.snapend)
snapdir = kwargs.pop("snapdir", cluster.snapdir)
skiprows = kwargs.pop("skiprows", cluster.skiprows)
projected = kwargs.pop("projected", cluster.projected)
analyze = kwargs.pop("analyze", True)
sortstars = kwargs.pop("sortstars", True)
otime = kwargs.pop("otime", False)
give = kwargs.pop('give','mxv')
return {
"nsnap": nsnap,
"delimiter": delimiter,
"wdir": wdir,
"nzfill": nzfill,
"snapbase": snapbase,
"snapend": snapend,
"snapdir": snapdir,
"skiprows": skiprows,
"projected": projected,
"analyze": analyze,
"sortstars": sortstars,
"otime": otime,
"give" : give,
}, kwargs
def _get_cluster_orbit(cluster, ofile, advance=False, col_names=["t", "x", "y", "z", "vx", "vy", "vz"],col_nums=[0, 1, 2, 3, 4, 5, 6], **kwargs):
""" Read in cluster oribit from an ascii file and apply it to StarCluster
cluster - class
StarCluster to be advanced
ofile : file
an already opened file containing orbit information (default: None)
advance : bool
Is this a continuation from a previous timestep, in which case read next line (default: False)
col_names : str
names corresponding to time, position, and velocity
col_nums : int
column numbers corresponding to each column name
Returns
-------
cluster : class
StarCluster
Other Parameters
----------------
nsnap : int
if nsnap is provided, read line # nsnap from the orbit file
ounits : str
if units are not the same as StarCluster units, provide them and they will be converted
otime : bool
use time in orbit file to set tphys (default:False)
Same as load_cluster
History
-------
2018
"""
nsnap = int(kwargs.get("nsnap", cluster.nsnap))
ounits = kwargs.get("ounits", None)
otime = kwargs.get("otime", False)
print('DEBUG:',nsnap,ounits,otime,advance)
# Read in orbital information from orbit
if nsnap != 0 and not advance:
for i in range(0, int(nsnap) + 1):
data = ofile.readline().split()
else:
data = ofile.readline().split()
#Testing
if True:
tphys,xgc,ygc,zgc,vxgc,vygc,vzgc=0.,0.,0.,0.,0.,0.,0.
for i in range(0,len(col_names)):
if col_names[i]=="t":
t=float(data[col_nums[i]])
elif col_names[i]=="x":
xgc=float(data[col_nums[i]])
elif col_names[i]=="y":
ygc=float(data[col_nums[i]])
elif col_names[i]=="z":
zgc=float(data[col_nums[i]])
elif col_names[i]=="vx":
vxgc=float(data[col_nums[i]])
elif col_names[i]=="vy":
vygc=float(data[col_nums[i]])
elif col_names[i]=="vz":
vzgc=float(data[col_nums[i]])
else:
tphys = float(data[0])
xgc = float(data[1])
ygc = float(data[2])
zgc = float(data[3])
vxgc = float(data[4])
vygc = float(data[5])
vzgc = float(data[6])
if cluster.tphys == 0.0 or otime:
cluster.tphys = tphys
cluster.add_orbit(xgc, ygc, zgc, vxgc, vygc, vzgc, ounits)
return
# Get StarCluster from Gyrfalcon output
def _get_gyrfalcon(
filein, units="WDunits", origin="galaxy", ofile=None, advance=False, **kwargs
):
"""Extract a single snapshot from an ascii file output from a gyrfalcon simulation
Parameters
----------
filein : file
opened nemo/gyrfalcon file
units : str
units of data (default:'WDunits')
ofile : file
opened file containing orbital information
advance : bool
is this a snapshot that has been advanced to from initial load_cluster? (default: False)
kwargs
------
give : str
set what parameters are read in from nemo/gyrfalcon (default: 'mxv')
Currently only accepts 'mxvpqael' as an alternative.
Returns
-------
cluster : class
StarCluster
Other Parameters
----------------
Same as load_cluster
History
-------
2018 - Written - Webb (UofT)
"""
if units == "WDunits":
vcon = 220.0 / conversion.velocity_in_kpcGyr(220.0, 8.0)
mcon = 222288.4543021174
units = "kpckms"
units0 = "WDunits"
else:
vcon = 1.0
mcon = 1.0
units0 = units
# Default **kwargs
skiprows = kwargs.pop("skiprows", 13)
give = kwargs.get('give','mxv')
i_d = []
m = []
x = []
y = []
z = []
vx = []
vy = []
vz = []
if give == 'mxvpqael':
gyrpot=[]
gyrq=[]
gyracc=[]
gyreps=[]
gyrlev=[]
elif give =='mxve':
gyreps=[]
over_head = False
ntot = 0
tphys = 0.0
for j in range(0, skiprows):
data = filein.readline().split()
if len(data) == 0:
print("END OF FILE")
return StarCluster(0.0,ctype="nemo",**kwargs)
elif "#" not in data:
over_head = True
print("OVER HEAD")
break
if any("Ntot" in dat for dat in data):
sntot = data[2]
ntot = int(sntot[:-1])
if any("time" in dat for dat in data):
tphys = float(data[2])
cluster = StarCluster(
tphys,
units=units,
origin=origin,
ctype="nemo",
sfile=filein,
bfile=None,
skiprows=skiprows,
**kwargs
)
for j in range(ntot):
if over_head:
over_head = False
else:
data = filein.readline().split()
if "#" in data:
break
i_d.append(j + 1)
m.append(float(data[0]) * mcon)
x.append(float(data[1]))
y.append(float(data[2]))
z.append(float(data[3]))
vx.append(float(data[4]) * vcon)
vy.append(float(data[5]) * vcon)
vz.append(float(data[6]) * vcon)
if give == 'mxvpqael':
gyrpot.append(float(data[7]))
gyrq.append(float(data[8]))
gyracc.append(float(data[9]))
gyreps.append(float(data[10]))
gyrlev.append(float(data[11]))
elif give== 'mxve':
gyreps.append(float(data[7]))
if ntot > 0:
cluster.add_stars(x, y, z, vx, vy, vz, m, i_d,sortstars=False)
if ofile == None:
cluster.find_centre()
else:
_get_cluster_orbit(cluster, ofile, advance=advance, **kwargs)
if kwargs.get("analyze", True):
sortstars=kwargs.get("sortstars", True)
cluster.to_cluster(sortstars=False)
cluster.find_centre()
cluster.to_centre(sortstars=sortstars)
cluster.to_galaxy()
if give == 'mxvpqael':
cluster.gyrpot=np.array(gyrpot)
cluster.gyrq=np.array(gyrq)
cluster.gyracc=np.array(gyracc)
cluster.eps=np.array(gyreps)
cluster.gyrlev=np.array(gyrlev)
elif give== 'mxve':
cluster.eps=np.array(gyreps)
if units0=='WDunits': cluster.units_init='WDunits'
return cluster
def _get_nbody6(out3, out33=None, fort82=None, fort83=None, ofile=None, advance=False, **kwargs):
"""Extract a single snapshot from NBODY6 output
- Called for Nbody6 simulations with or without stellar evolution
Parameters
----------
out3 : file
opened OUT3 file
out33 : file
opened OUT33 file containing tail stars (default: None)
fort82 : file
opened fort.82 file containing BSE data (default: None)
fort83 : file
opened fort.83 file containing SSE data (default: None)
ofile : file
opened file containing orbital information
advance : bool
is this a snapshot that has been advanced to from initial load_cluster? (default: False)
Returns
-------
cluster : class
StarCluster
Other Parameters
----------------
Same as load_cluster
History
-------
2020 - Written - Webb (UofT)
"""
initialize = kwargs.get("initialize", False)
if out3 is not None:
ntot,alist,x,y,z,vx,vy,vz,m,i_d=_get_nbody6_out3(out3,**kwargs)
cluster = StarCluster(
alist[0],
units="nbody",
origin="cluster",
ctype="nbody6",
sfile=out3,
)
if ntot > 0:
cluster.add_nbody6(
alist[13], alist[12], alist[2], alist[4], alist[6], alist[7], alist[8], alist[3], alist[11], alist[17], ntot, alist[1], ntot+alist[1]
)
cluster.add_stars(x, y, z, vx, vy, vz, m, i_d)
if out33 is not None:
cluster.bfile=out33
ntot,alist,x,y,z,vx,vy,vz,m,i_d=_get_nbody6_out33(out33,**kwargs)
if ntot > 0:
cluster.add_stars(x, y, z, vx, vy, vz, m, i_d)
cluster.add_orbit(alist[0],alist[1],alist[2],alist[3],alist[4],alist[5])
if fort82 is not None and fort83 is not None:
cluster.ssefile=fort83
cluster.bsefile=fort82
i_d,kw,ri,m1,zl1,r1,te,i_d1,i_d2,kw1,kw2,kwb,rib,ecc,pb,semi,m1b,m2b,zl1b,zl2b,r1b,r2b,te1,te2=_get_nbody6se(fort82,fort83,**kwargs)
cluster.add_sse(kw,zl1,r1)
cluster.add_bse(i_d1,i_d2,kw1,kw2,kwb,ecc,pb,semi,m1b,m2b,zl1b,zl2b,r1b,r2b)
if kwargs.get("analyze", True) and cluster.ntot>0:
sortstars=kwargs.get("sortstars", True)
cluster.analyze(sortstars=sortstars)
if ofile != None:
_get_cluster_orbit(cluster, ofile, advance=advance, **kwargs)
return cluster
def _get_nbody6_out3(f,**kwargs):
#Read in header
try:
start_header_block_size = struct.unpack('i',f.read(4))[0]
except:
return 0,np.zeros(20),0,0,0,0,0,0,0,0
ntot = struct.unpack('i',f.read(4))[0]
model = struct.unpack('i',f.read(4))[0]
nrun = struct.unpack('i',f.read(4))[0]
nk = struct.unpack('i',f.read(4))[0]
end_header_block_size = struct.unpack('i',f.read(4))[0]
if start_header_block_size != end_header_block_size:
print('Error reading OUT3')
return -1
# Read in stellar data
start_data_block_size = struct.unpack('i',f.read(4))[0] #begin data block size
#Read in alist array from NBODY6
alist = []
for i in range(nk):
alist.append(struct.unpack('f',f.read(4))[0]) #Sverre's 'as'
#print(alist)
#Read in masses, positions, velocities, and id's
m=np.array([])
x,y,z=np.array([]),np.array([]),np.array([])
vx,vy,vz=np.array([]),np.array([]),np.array([])
i_d=np.array([])
for i in range(ntot):
m=np.append(m,struct.unpack('f',f.read(4))[0])
#print(m)
for i in range(ntot):
x=np.append(x,struct.unpack('f',f.read(4))[0])
y=np.append(y,struct.unpack('f',f.read(4))[0])
z=np.append(z,struct.unpack('f',f.read(4))[0])
for i in range(ntot):
vx=np.append(vx,struct.unpack('f',f.read(4))[0])
vy=np.append(vy,struct.unpack('f',f.read(4))[0])
vz=np.append(vz,struct.unpack('f',f.read(4))[0]) | |
<filename>nilearn/input_data/nifti_spheres_masker.py
"""
Transformer for computing seeds signals
----------------------------------------
Mask nifti images by spherical volumes for seed-region analyses
"""
import numpy as np
import warnings
from sklearn import neighbors
from joblib import Memory
from scipy import sparse
from ..image.resampling import coord_transform
from .._utils.niimg_conversions import _safe_get_data
from .._utils import CacheMixin, logger
from .._utils.niimg import img_data_dtype
from .._utils.niimg_conversions import check_niimg_4d, check_niimg_3d
from .._utils.class_inspect import get_params
from .. import image
from .. import masking
from .base_masker import filter_and_extract, BaseMasker
def _apply_mask_and_get_affinity(seeds, niimg, radius, allow_overlap,
mask_img=None):
'''Utility function to get only the rows which are occupied by sphere at
given seed locations and the provided radius. Rows are in target_affine and
target_shape space.
Parameters
----------
seeds: List of triplets of coordinates in native space
Seed definitions. List of coordinates of the seeds in the same space
as target_affine.
niimg: 3D/4D Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
Images to process. It must boil down to a 4D image with scans
number as last dimension.
radius: float
Indicates, in millimeters, the radius for the sphere around the seed.
allow_overlap: boolean
If False, a ValueError is raised if VOIs overlap
mask_img: Niimg-like object,
Mask to apply to regions before extracting signals. If niimg is None,
mask_img is used as a reference space in which the spheres 'indices are
placed.
Returns
-------
X: 2D numpy.ndarray
Signal for each brain voxel in the (masked) niimgs.
shape: (number of scans, number of voxels)
A: scipy.sparse.lil_matrix
Contains the boolean indices for each sphere.
shape: (number of seeds, number of voxels)
'''
seeds = list(seeds)
# Compute world coordinates of all in-mask voxels.
if niimg is None:
mask, affine = masking._load_mask_img(mask_img)
# Get coordinate for alle voxels inside of mask
mask_coords = np.asarray(np.nonzero(mask)).T.tolist()
X = None
elif mask_img is not None:
affine = niimg.affine
mask_img = check_niimg_3d(mask_img)
mask_img = image.resample_img(mask_img, target_affine=affine,
target_shape=niimg.shape[:3],
interpolation='nearest')
mask, _ = masking._load_mask_img(mask_img)
mask_coords = list(zip(*np.where(mask != 0)))
X = masking._apply_mask_fmri(niimg, mask_img)
elif niimg is not None:
affine = niimg.affine
if np.isnan(np.sum(_safe_get_data(niimg))):
warnings.warn('The imgs you have fed into fit_transform() contains'
' NaN values which will be converted to zeroes ')
X = _safe_get_data(niimg, True).reshape([-1, niimg.shape[3]]).T
else:
X = _safe_get_data(niimg).reshape([-1, niimg.shape[3]]).T
mask_coords = list(np.ndindex(niimg.shape[:3]))
else:
raise ValueError("Either a niimg or a mask_img must be provided.")
# For each seed, get coordinates of nearest voxel
nearests = []
for sx, sy, sz in seeds:
nearest = np.round(coord_transform(sx, sy, sz, np.linalg.inv(affine)))
nearest = nearest.astype(int)
nearest = (nearest[0], nearest[1], nearest[2])
try:
nearests.append(mask_coords.index(nearest))
except ValueError:
nearests.append(None)
mask_coords = np.asarray(list(zip(*mask_coords)))
mask_coords = coord_transform(mask_coords[0], mask_coords[1],
mask_coords[2], affine)
mask_coords = np.asarray(mask_coords).T
clf = neighbors.NearestNeighbors(radius=radius)
A = clf.fit(mask_coords).radius_neighbors_graph(seeds)
A = A.tolil()
for i, nearest in enumerate(nearests):
if nearest is None:
continue
A[i, nearest] = True
# Include the voxel containing the seed itself if not masked
mask_coords = mask_coords.astype(int).tolist()
for i, seed in enumerate(seeds):
try:
A[i, mask_coords.index(list(map(int, seed)))] = True
except ValueError:
# seed is not in the mask
pass
sphere_sizes = np.asarray(A.tocsr().sum(axis=1)).ravel()
empty_spheres = np.nonzero(sphere_sizes == 0)[0]
if len(empty_spheres) != 0:
raise ValueError("These spheres are empty: {}".format(empty_spheres))
if not allow_overlap:
if np.any(A.sum(axis=0) >= 2):
raise ValueError('Overlap detected between spheres')
return X, A
def _iter_signals_from_spheres(seeds, niimg, radius, allow_overlap,
mask_img=None):
"""Utility function to iterate over spheres.
Parameters
----------
seeds: List of triplets of coordinates in native space
Seed definitions. List of coordinates of the seeds in the same space
as the images (typically MNI or TAL).
imgs: 3D/4D Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
Images to process. It must boil down to a 4D image with scans
number as last dimension.
radius: float
Indicates, in millimeters, the radius for the sphere around the seed.
allow_overlap: boolean
If False, an error is raised if the maps overlaps (ie at least two
maps have a non-zero value for the same voxel).
mask_img: Niimg-like object, optional
See http://nilearn.github.io/manipulating_images/input_output.html
Mask to apply to regions before extracting signals.
"""
X, A = _apply_mask_and_get_affinity(seeds, niimg, radius,
allow_overlap,
mask_img=mask_img)
for i, row in enumerate(A.rows):
yield X[:, row]
class _ExtractionFunctor(object):
func_name = 'nifti_spheres_masker_extractor'
def __init__(self, seeds_, radius, mask_img, allow_overlap, dtype):
self.seeds_ = seeds_
self.radius = radius
self.mask_img = mask_img
self.allow_overlap = allow_overlap
self.dtype = dtype
def __call__(self, imgs):
n_seeds = len(self.seeds_)
imgs = check_niimg_4d(imgs, dtype=self.dtype)
signals = np.empty((imgs.shape[3], n_seeds), dtype=img_data_dtype(imgs))
for i, sphere in enumerate(_iter_signals_from_spheres(
self.seeds_, imgs, self.radius, self.allow_overlap,
mask_img=self.mask_img)):
signals[:, i] = np.mean(sphere, axis=1)
return signals, None
class NiftiSpheresMasker(BaseMasker, CacheMixin):
"""Class for masking of Niimg-like objects using seeds.
NiftiSpheresMasker is useful when data from given seeds should be
extracted. Use case: Summarize brain signals from seeds that were
obtained from prior knowledge.
Parameters
----------
seeds: List of triplet of coordinates in native space
Seed definitions. List of coordinates of the seeds in the same space
as the images (typically MNI or TAL).
radius: float, optional
Indicates, in millimeters, the radius for the sphere around the seed.
Default is None (signal is extracted on a single voxel).
mask_img: Niimg-like object, optional
See http://nilearn.github.io/manipulating_images/input_output.html
Mask to apply to regions before extracting signals.
allow_overlap: boolean, optional
If False, an error is raised if the maps overlaps (ie at least two
maps have a non-zero value for the same voxel). Default is False.
smoothing_fwhm: float, optional
If smoothing_fwhm is not None, it gives the full-width half maximum in
millimeters of the spatial smoothing to apply to the signal.
standardize: {'zscore', 'psc', True, False}, default is 'zscore'
Strategy to standardize the signal.
'zscore': the signal is z-scored. Timeseries are shifted
to zero mean and scaled to unit variance.
'psc': Timeseries are shifted to zero mean value and scaled
to percent signal change (as compared to original mean signal).
True : the signal is z-scored. Timeseries are shifted
to zero mean and scaled to unit variance.
False : Do not standardize the data.
detrend: boolean, optional
This parameter is passed to signal.clean. Please see the related
documentation for details.
low_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details.
high_pass: None or float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details.
t_r: float, optional
This parameter is passed to signal.clean. Please see the related
documentation for details.
dtype: {dtype, "auto"}
Data type toward which the data should be converted. If "auto", the
data will be converted to int32 if dtype is discrete and float32 if it
is continuous.
memory: joblib.Memory or str, optional
Used to cache the region extraction process.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
memory_level: int, optional
Aggressiveness of memory caching. The higher the number, the higher
the number of functions that will be cached. Zero means no caching.
verbose: integer, optional
Indicate the level of verbosity. By default, nothing is printed.
See also
--------
nilearn.input_data.NiftiMasker
"""
# memory and memory_level are used by CacheMixin.
def __init__(self, seeds, radius=None, mask_img=None, allow_overlap=False,
smoothing_fwhm=None, standardize=False, detrend=False,
low_pass=None, high_pass=None, t_r=None, dtype=None,
memory=Memory(location=None, verbose=0), memory_level=1,
verbose=0):
self.seeds = seeds
self.mask_img = mask_img
self.radius = radius
self.allow_overlap = allow_overlap
# Parameters for _smooth_array
self.smoothing_fwhm = smoothing_fwhm
# Parameters for clean()
self.standardize = standardize
self.detrend = detrend
self.low_pass = low_pass
self.high_pass = high_pass
self.t_r = t_r
self.dtype = dtype
# Parameters for joblib
self.memory = memory
self.memory_level = memory_level
self.verbose = verbose
def fit(self, X=None, y=None):
"""Prepare signal extraction from regions.
All parameters are unused, they are for scikit-learn compatibility.
"""
if hasattr(self, 'seeds_'):
return self
error = ("Seeds must be a list of triplets of coordinates in "
"native space.\n")
if not hasattr(self.seeds, '__iter__'):
raise ValueError(error + "Given seed list is of type: " +
type(self.seeds))
self.seeds_ = []
# Check seeds and convert them to lists if needed
for i, seed in enumerate(self.seeds):
# Check the type first
if not hasattr(seed, '__len__'):
raise ValueError(error + "Seed #%i is not a valid triplet "
"of coordinates. It is of type %s."
% (i, type(seed)))
# Convert to list because it is easier to process
if isinstance(seed, np.ndarray):
seed = seed.tolist()
else:
# in case of | |
#!/usr/bin/env python
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Routines for making payments to miners
#
# Call flow for grin API V2 is the following:
# File send:
# Sender: init_send_tx
# Sender: tx_lock_outputs
# Recipient: receive_tx
# Sender: finalize_tx
# Sender: post_tx
#
# HTTP send:
# Sender: init_send_tx with InitTxSendArgs (client receive it directly and finalization is made synchronously)
import sys
import os
import time
import json
import socket
import requests
import traceback
from datetime import datetime
from urllib.parse import urlparse
from grinlib import lib
from grinlib import grin
from grinlib import wallet
from grinbase.model.pool_utxo import Pool_utxo
from grinbase.model.pool_payment import Pool_payment
class PaymentError(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return repr(self.message)
def validateAddress(address, adderss_type, logger):
logger.warn("validateAddress with {} {}".format(address, adderss_type))
if adderss_type in ["http", "https"]:
try:
logger.warn("Validating http wallet address: {}".format(address))
return urlparse(address).scheme in ['http', 'https']
except Exception as e:
logger.exception("Wallet http address is invalid: {}".format(str(e)))
return False
return True # XXX TODO Validate keybase address
def testWalletPort(address, logger):
try:
logger.warn("testWalletPort: {}".format(address))
s = socket.socket()
s.settimeout(2)
parsed = urlparse(address)
if parsed.scheme == "http":
addr = parsed.netloc
port = 80
else:
addr = parsed.netloc
port = 443
if ":" in parsed.netloc:
addr, port = parsed.netloc.split(':')
port = int(port)
logger.warn("Testing: {}, {}".format(addr, port))
s.connect((addr, port))
s.close()
except Exception as e:
logger.exception("Failed test connection: {}".format(str(e)))
return False
return True
# Get the users balance then call the wallet owner API to
# generate a payment tx slate. Return that slate to the caller
def get_tx_slate(user_id, logger, database, method, invoked_by):
# 1) Create a Slate
slate = None
try:
locked_utxo = Pool_utxo.get_locked_by_userid(user_id)
if locked_utxo is None or locked_utxo.amount < (1 * 1000000000):
message = "Insufficient available balance for payout"
logger.warn(message)
raise PaymentError(400, message)
amount = locked_utxo.amount
# Generate a slate file
try:
args = {
'src_acct_name': None,
'amount': int(amount),
'minimum_confirmations': 10,
'max_outputs': 10,
'num_change_outputs': 1,
'selection_strategy_is_use_all': False,
'message': "pool payment: slate: user_id={}".format(user_id),
'target_slate_version': None,
'send_args': None,
}
logger.warn("Requesting Payment slate from payment request api: {}".format(args))
slate = wallet.init_send_tx(args)
except Exception as e:
logger.exception("Failed to get a payment slate: {}".format(str(e)))
raise PaymentError(500, str(e))
except PaymentError as e: # My own errors
raise
except Exception as e:
logger.exception("Failed to get a payment slate: {}".format(str(e)))
raise PaymentError(500, str(e))
# 2) Create a payment record
try:
timestamp = datetime.utcnow()
payment_record = Pool_payment(
user_id = locked_utxo.user_id,
timestamp = timestamp,
height = slate["height"],
address = slate["id"],
amount = amount,
method = method,
fee = slate["fee"],
failure_count = locked_utxo.failure_count,
state = "sent",
tx_data = json.dumps(slate),
invoked_by = invoked_by,
)
database.db.getSession().add(payment_record)
# Update the users utxo record
locked_utxo.amount = int(slate["fee"]) * -1
locked_utxo.last_try = timestamp
locked_utxo.total_amount += amount
database.db.getSession().commit()
except Exception as e:
logger.exception("Failed to create payment record: {}".format(str(e)))
raise PaymentError(500, str(e))
# 3) Lock the wallet outputs
try:
wallet.tx_lock_outputs(slate)
except Exception as e:
logger.exception("Failed to lock wallet outputs: {}".format(str(e)))
raise PaymentError(500, str(e))
# Return the slate
return slate
# Cancel / Expire a tx and refund users coins
def cancel_tx_slate(tx_slate_id, new_state, logger, database):
try:
logger.warn("In cancel_tx_slate")
# For tx sent via slate, tx_id is in the pool_payment.address field
payment_rec = Pool_payment.get_by_address(tx_slate_id)
if payment_rec is None:
message = "Could not find any payment record for tx_slate_id {}".format(tx_slate_id)
logger.warn(message)
raise PaymentError(400, message)
# Check if the wallet already has this marked as canceled
wallet_rec = wallet.retrieve_txs(tx_slate_id=tx_slate_id)
if len(wallet_rec) == 0:
logger.warn("Wallet has no record of tx_slate_id: {}".format(tx_slate_id))
else:
logger.warn("XXX: wallet_rec = {}".format(wallet_rec))
assert wallet_rec[0]["tx_slate_id"] == tx_slate_id, "Wallet returned incorrect tx data: {} vs {}".format(wallet_rec[0]["tx_slate_id"], tx_slate_id)
if wallet_rec[0]["tx_type"] == "TxSentCancelled":
logger.warn("Tx already marked canceled: {}".format(tx_slate_id))
else:
wallet.cancel_tx(tx_slate_id=tx_slate_id)
# Mark payment record state as expired or canceled
payment_rec.state = new_state
# Credit back the user utxo amount
locked_utxo = Pool_utxo.get_locked_by_userid(payment_rec.user_id)
locked_utxo.amount = locked_utxo.amount + payment_rec.amount + payment_rec.fee
locked_utxo.failure_count += payment_rec.failure_count + 1
database.db.getSession().commit()
except Exception as e:
logger.exception("Unexpected Error in cancel_tx_slate: {}".format(str(e)))
raise PaymentError(500, str(e))
# Submit a signed slate to the blockchain - finalize and post
def submit_tx_slate(user_id, slate, logger, database):
if slate is None:
message = "No slate data provided"
logger.warn(message)
raise PaymentError(400, message)
try:
slate_json = json.loads(slate.decode('utf-8'))
tx_id = slate_json["id"]
except Exception as e:
message = "Invalid slate data provided"
logger.warn(message)
raise PaymentError(400, message)
try:
logger.warn("Running submit_slate: tx_id = {}".format(tx_id))
# Record Keeping
timestamp = datetime.utcnow()
locked_utxo = Pool_utxo.get_locked_by_userid(user_id)
locked_utxo.last_success = timestamp
locked_utxo.failure_count = 0
payment_rec = Pool_payment.get_by_address(tx_id)
payment_rec.state = "posted"
finalized_slate = wallet.finalize_tx(slate_json)
payment_rec.tx_data = json.dumps(finalized_slate)
database.db.getSession().commit()
except Exception as e:
logger.exception("Unexpected Error in submit_tx_slate")
raise PaymentError(500, str(e))
# Post the TX
try:
wallet.post_tx(finalized_slate["tx"])
except Exception as e:
logger.exception("Failed to post payment: {}".format(repr(e)))
raise PaymentError(500, str(e))
# Called from atomic_send()
def http_send(user_id, address, amount, logger, num_change_outputs=1):
send_args = {
'method': 'http',
'dest': str(address),
'finalize': True,
'post_tx': False,
'fluff': False,
}
args = {
'src_acct_name': None,
'amount': int(amount),
'minimum_confirmations': 10,
'max_outputs': 10,
'num_change_outputs': int(num_change_outputs),
'selection_strategy_is_use_all': False,
'message': "pool payment: http: user_id={}".format(user_id),
'target_slate_version': None,
'send_args': send_args,
}
try:
finalized_slate = wallet.init_send_tx(args)
except Exception as e:
logger.error("HTTP send failed with error {}".format(str(e)))
if "is recipient listening" in str(e):
raise PaymentError(400, "Could not connect to remote wallet listener (is recipient listening?)")
else:
raise PaymentError(500, str(e))
logger.warn("Sent OK: user_id={} - address={} - amount {}".format(user_id, address, amount))
return finalized_slate
# Called from atomic_send()
def keybase_send(user_id, address, amount, logger):
send_args = {
'method': 'keybase',
'dest': str(address),
'finalize': True,
'post_tx': False,
'fluff': False,
}
args = {
'src_acct_name': None,
'amount': int(amount),
'minimum_confirmations': 10,
'max_outputs': 10,
'num_change_outputs': 1,
'selection_strategy_is_use_all': False,
'message': "pool payment: keybase: user_id={}".format(user_id),
'target_slate_version': None,
'send_args': send_args,
}
try:
finalized_slate = wallet.init_send_tx(args)
except Exception as e:
logger.error("Keybase send failed with error {}".format(str(e)))
raise PaymentError(500, str(e))
logger.warn("Sent OK: user_id={} - address={} - amount {}".format(user_id, address, amount))
return finalized_slate
# Atomic send used for http and keybase
def atomic_send(user_id, address, logger, database, method, invoked_by):
# validate method
if method not in ["http", "https", "keybase"]:
message = "Invalid payment method requested"
logger.warn(message)
raise PaymentError(400, message)
# Validate Address
address = address.lstrip().rstrip()
if address is None:
message = "Wallet address is missing"
logger.warn(message)
raise PaymentError(400, message)
if method == "http" or method == "https":
if not address.startswith("http"):
address = method + "://" + address
valid = validateAddress(address, method, logger)
if valid == False:
message = "Wallet address is invalid: {}".format(address)
logger.warn(message)
raise PaymentError(400, message)
if method == "http" or method == "https":
probe = testWalletPort(address, logger)
if probe == False:
message = "Failed to establish connection with remote wallet listener at: {}".format(address)
logger .warn(message)
raise PaymentError(400, message)
# Lock this utxo record for update and check for minimum balance
amount = 0
try:
locked_utxo = Pool_utxo.get_locked_by_userid(user_id)
if locked_utxo is None or locked_utxo.amount < (1 * 1000000000):
message = "Insufficient available balance for payout"
logger.warn(message)
raise PaymentError(400, message)
# Save the users current balance
amount = locked_utxo.amount
except PaymentError as e: # My own errors
raise
except Exception as e:
logger.exception("Failed to get worker balance: {}".format(str(e)))
raise PaymentError(500, str(e))
# Call the synchronous send method
# Subtract the balance from UTXO
slate = None
try:
timestamp = datetime.utcnow()
# Send the TX
if method == "http" or method == "https":
slate = http_send(user_id, address, amount, logger)
elif method == "keybase":
slate = keybase_send(user_id, address, amount, logger)
# Create a payment record
payment_rec = Pool_payment(
user_id = locked_utxo.user_id,
timestamp = timestamp,
height = slate["height"],
address = str(address),
amount = locked_utxo.amount,
method = method,
fee = int(slate["fee"]),
failure_count = locked_utxo.failure_count,
state = "posted",
tx_data = json.dumps(slate),
invoked_by = invoked_by,
)
database.db.getSession().add(payment_rec)
# Update the users utxo record
locked_utxo.amount = int(slate["fee"]) * -1
locked_utxo.last_try = timestamp
locked_utxo.last_success = timestamp
locked_utxo.total_amount += amount
# Commit this
database.db.getSession().commit()
except PaymentError as e: # My own errors
logger.exception("Failed to send tx".format(repr(e)))
if slate is | |
# rambutan.py
# Contact: <NAME>
# <EMAIL>
import os, numpy, pandas
try:
from sklearn.metrics import roc_auc_score
except:
roc_auc_score = 'acc'
from joblib import Parallel, delayed
from .io import TrainingGenerator, ValidationGenerator
from .utils import bedgraph_to_dense, fasta_to_dense
from .utils import encode_dnase, extract_regions
def extract_sequence(filename, verbose=False):
"""Extract a nucleotide sequence from a file and encode it.
This function will read in a FastA formatted DNA file and convert it to be
a one-hot encoded numpy array for internal use. If a one-hot encoded file
is passed in, it is simply returned. This function is a convenient wrapper
for joblib to parallelize the unzipping portion.
Parameters
----------
filename : str or numpy.ndarray
The name of the fasta file to open or the one-hot encoded sequence.
verbose: bool, optional
Whether to report the status while extracting sequence. This does not
look good when done in parallel, so it is suggested it is set to false
in that case.
Returns
-------
sequence : numpy.ndarray, shape=(n, 4)
The one-hot encoded DNA sequence.
"""
if isinstance(filename, str):
if verbose:
print("Converting {}".format(filename))
return fasta_to_dense(filename, verbose)
return filename
def extract_dnase(filename, verbose=False):
"""Extract a DNaseI file and encode it.
This function will read in a bedgraph format file and convert it to the
one-hot encoded numpy array used internally. If a one-hot encoded file is
passed in, it is simple returned. This function is a convenient wrapper for
joblib to parallelize the unzipping portion.
Parameters
----------
filename : str or numpy.ndarray
The name of the bedgraph file to open or the one-hot encoded sequence.
verbose: bool, optional
Whether to report the status while extracting sequence. This does not
look good when done in parallel, so it is suggested it is set to false
in that case.
Returns
-------
sequence : numpy.ndarray, shape=(n, 8)
The one-hot encoded DNaseI sequence.
"""
if isinstance(filename, str):
if verbose:
print("Converting {}".format(filename))
dnase_dense = bedgraph_to_dense(filename, verbose)
if verbose:
print("Encoding {}".format(filename))
dnase_ohe = encode_dnase(dnase_dense, verbose)
return dnase_ohe
return filename
class Rambutan(object):
"""Rambutan: a predictor of mid-range DNA-DNA contacts.
This serves as a wrapper for all functionality involving the use of Rambutan.
There are two main functions to use, fit and predict. Fit involves taking in
nucleotide sequence, DNaseI sensitivity, and a contact map, and training the
model. Predict involves taking in nucleotide sequence and DNaseI sensitivity
and predicting significant contacts.
Note: Due to a limitation of mxnets part, you cannot fit and predict in the
same program. You must fit the model and save the parameters during training,
and then load the pre-fit model and make predictions.
Parameters
----------
name : str, optional
The name of the model, necessary for saving or loading parameters.
Default is 'rambutan'.
iteration : int or None, optional
The iteration of training to load model parameters from, if using Rambutan
in predict mode. Default is None.
model : mxnet.symbol or None
An alternate neural network can be passed in if one wishes to train that
using the same framework instead of the original Rambutan model.
learning_rate : float, optional
The learning rate for the optimizer. Default is 0.01.
num_epoch : int, optional
The number of epochs to train the model for. Default is 25.
epoch_size : int, optional
The number of batches which comprise an 'epoch'. Default is 500.
wd : float, optional
The weight decay. This is equivalent to L2 regularization. Default is
0.0.
optimizer : str, optional
The optimizer to use for training. Default is 'adam'.
batch_size : int, optional
The number of samples to use in each batch. Default is 1024.
min_dist : int, optional
The minimum distance to consider contacts for. Default is 50kb.
max_dist : int, optional
The maximum distance to consider contacts for. Default is 1mb.
use_seq : bool, optional
Whether to use nucleotide sequence as an input to the model in the
training step. Default is True.
use_dnase : bool, optional
Whether to use DNaseI sensitivity as an input to the model in the
training step. Default is True.
use_dist : bool, optional
Whether to use genomic distance as an input to the model in the
training step. Default is True.
verbose : bool, optional
Whether to output information during training and prediction. Default
is True.
Example
-------
>>> from rambutan import Rambutan
>>> import numpy
>>> y_pred = Rambutan(iteration=25).predict('chr21.fa', 'chr21.GM12878.dnase.bedgraph', ctxs=[0, 1, 2, 3])
>>> numpy.save("chr21.predictions.npy", y_pred)
"""
def __init__(self, name='rambutan', iteration=None, model=None,
learning_rate=0.01, num_epoch=25, epoch_size=500, wd=0.0,
optimizer='adam', batch_size=1024, min_dist=50000, max_dist=1000000,
use_seq=True, use_dnase=True, use_dist=True, verbose=True):
self.name = name
self.iteration = iteration
self.model = model
self.learning_rate = learning_rate
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.wd = wd
self.optimizer = optimizer
self.batch_size = batch_size
self.min_dist = min_dist
self.max_dist = max_dist
self.use_seq = use_seq
self.use_dnase = use_dnase
self.use_dist = use_dist
self.verbose = verbose
def predict(self, sequence, dnase, regions=None, ctxs=[0], sparse=False):
"""Make predictions and return the matrix of probabilities.
Rambutan will make a prediction for each pair of genomic loci defined in
`regions' which fall between `min_dist' and `max_dist'. Inputs can either
be appropriately encoded sequence and dnase files, or fasta files and
bedgraph files for the nucleotide sequence and DNaseI sensitivity
respectively. Note: fasta files and bedgraph files must be made up of
a single chromosome, not one entry per chromosome.
Parameters
----------
sequence : numpy.ndarray, shape (n, 4) or str
The nucleotide sequence. Either a one hot encoded matrix of
nucleotides with n being the size of the chromosome, or a file
name for a fasta file.
dnase : numpy.ndarray, shape (n, 8) or str
The DNaseI fold change sensitivity. Either an encoded matrix in
the manner described in the manuscript or the file name of a
bedgraph file.
regions : numpy.ndarray or None, optional
The regions of interest to look at. All other regions will be
ignored. If set to none, the regions of interest are defined
to be 1kb bins for which all nucleotides are mappable, i.e.
where there are no n or N symbols in the fasta file. Default
is None.
ctxs : list, optional
The contexts of the gpus to use for prediction. Currently
prediction is only supported on gpus and not cpus due to
the time it would take for prediction. For example, if you
wanted to use three gpus of index 0 1 and 3 (because 2
is busy doing something else) you would just pass in
ctxs=[0, 1, 3] and the prediction task will be naturally
parallelized across your 3 gpus with a linear speedup.
sparse : bool, optional
Whether to return three arrays, the rows, columns, and values,
or the full dense matrix. Sparse is useful for large matrices.
Returns
-------
y : numpy.ndarray, shape=(m, m)
A matrix of predictions of shape (m, m) where m is the number of
1kb loci in the chromosome. The predictions will reside in the
upper triangle of the matrix since predictions are symmetric.
"""
if isinstance(sequence, str) and isinstance(dnase, str):
if self.verbose:
print("Converting FASTA")
sequence = fasta_to_dense(sequence, self.verbose)
if self.verbose:
print("Converting DNase")
dnase = bedgraph_to_dense(dnase, self.verbose)
if self.verbose:
print("Encoding DNase")
dnase = encode_dnase(dnase, self.verbose)
if regions is None:
regions = extract_regions(sequence)
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
from .models import predict_task
Parallel(n_jobs=len(ctxs))( delayed(predict_task)(self.name,
self.iteration, ctx, len(ctxs), sequence, dnase, regions,
self.use_seq, self.use_dnase, self.use_dist, self.min_dist,
self.max_dist, self.batch_size, self.verbose) for ctx in ctxs)
if sparse == False:
n = int(regions.max()) / 1000 + 1
y = numpy.zeros((n, n))
for ctx in ctxs:
with open('.rambutan.predictions.{}.txt'.format(ctx), 'r') as infile:
for line in infile:
mid1, mid2, p = line.split()
mid1 = (int(float(mid1)) - 500) / 1000
mid2 = (int(float(mid2)) - 500) / 1000
p = float(p)
y[mid1, mid2] = p
os.system('rm .rambutan.predictions.{}.txt'.format(ctx))
return y
else:
rows, cols, values = [], [], []
for ctx in ctxs:
with open('.rambutan.predictions.{}.txt'.format(ctx), 'r') as infile:
for line in infile:
mid1, mid2, p = line.split()
mid1, mid2, p = int(mid1), int(mid2), float(p)
rows.append(mid1)
cols.append(mid2)
values.append(p)
os.system('rm .rambutan.predictions.{}.txt'.format(ctx))
rows = numpy.array(rows)
cols = numpy.array(cols)
values = numpy.array(values)
return rows, cols, values
def fit(self, sequence, dnase, contacts, regions=None, validation_contacts=None,
training_chromosome=None, validation_chromosome=None, ctxs=[0],
eval_metric=roc_auc_score, symbol=None, n_jobs=1):
"""Fit the model to sequence, DNaseI, and Hi-C data.
You can fit the Rambutan model to new data. One must pass in sequence
data, DNaseI data, and Hi-C contact maps. The sequence data can come
either in the form of FastA files or one-hot encoded numpy arrays. The
DNaseI data can likewise come as either bedgraph files or numpy arrays.
The Hi-C data must come in the traditional 7 column format. Validation
data can optionally be passed in to report a validation set error during
the training process. NOTE: Regardless of if they are used or not, all
chromosomes should be passed in to the `sequence` and `dnase` parameters.
The contacts specified in `contacts` will dictate which are used. This is
to make the internals easier.
Parameters for training such as the number of epochs and batches are
set in the initial constructor, following with the sklearn format for
estimators.
Parameters
----------
sequence : numpy.ndarray, shape (n, 4) or str
The nucleotide sequence. Either a one hot encoded matrix of
nucleotides with n being the size of the chromosome, or a file
name for a fasta file.
dnase : numpy.ndarray, shape (n, 8) or str
The DNaseI fold change sensitivity. Either an encoded matrix in
the manner described in | |
"cli:handler")
):
# No population for target language nodes.
pass
elif ctx.xml.is_node_with_attr(_xml_sub_node, "cli:menu", "@name") and ctx.xml.is_node(xml_node, "cli:cli"):
# Do not populate menus which are defined at the cli level.
pass
elif ctx.xml.is_node_with_attr(_xml_sub_node, "cli:tag", "@ref"):
# No population for tag[@ref] right now.
# Because tag[@id] could be processed later, tag[@ref] will be processed at the very end,
# in the main populate() routine above.
pass
elif ctx.xml.is_node(_xml_sub_node, "cli:*"):
ctx.Menu.Populate.create_node(ctx, xml_menu, _xml_sub_node, indent_count=indent_count)
if ctx.xml.is_node(_xml_sub_node, "cli:cli"):
# Should never occur.
pass
elif ctx.xml.is_node_with_attr(_xml_sub_node, "cli:menu", "@name"):
# Do not populate menus, this is done directly at the cli level.
pass
else:
ctx.Menu.Populate.walk(ctx, xml_menu, _xml_sub_node, indent_count=indent_count+1)
@staticmethod
def create_node(ctx, xml_menu, xml_node, indent_count=0, with_creation_params=True):
""" Create the node for population.
@param ctx (Cli2Cpp) Execution context.
@param xml_menu (XML node) Current menu.
@param xml_node (XML node) Current node.
@param indent_count (int) Indentation depth.
@param with_creation_params (bool) Insert creation parameters or not (default is True). """
_class_name = ctx.Utils.node2class(ctx, xml_node)
_var_name = ctx.Utils.node2var(ctx, xml_node)
_parent_var = ctx.Utils.node2var(ctx, ctx.xml.parent_node(xml_node))
ctx.Utils.indent(ctx, xml_menu, indent_count)
_do_not_cast = ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@ref") or ctx.xml.is_node(xml_node, "cli:menu")
if not _do_not_cast:
ctx.out.put("%s = dynamic_cast<%s*>(& " % (_var_name, _class_name))
# Attach the new object to the parent node.
ctx.out.put("%s" % _parent_var)
if ctx.xml.is_node(xml_node, "cli:menu"):
# Populate menu[@ref] item.
ctx.out.put("->SetMenuRef(new cli::MenuRef(")
_menu_ref_set = False
if ctx.xml.attr_value(xml_node, "@name") is not None:
ctx.out.put("*%s" % _var_name)
_menu_ref_set = True
elif ctx.xml.attr_value(xml_node, "@ref") is not None:
_xml_menu = ctx.cache.node(xml_node).cli_Cli2xxx_menu
if _xml_menu is not None:
ctx.out.put("*%s" % ctx.Utils.node2var(ctx, _xml_menu))
_menu_ref_set = True
if not _menu_ref_set:
ctx.Utils.abort(ctx, xml_node, "missing menu/@name or menu/@ref attribute, or invalid menu/@ref reference")
ctx.out.put("))")
else:
# Populate other kind of item.
ctx.out.put("->AddElement(new %s(" % _class_name)
if with_creation_params:
ctx.Menu.Constructor.params(ctx, xml_node)
ctx.out.put("))")
if not _do_not_cast:
ctx.out.put(")")
# Terminate the line.
ctx.out.put(";").endl()
class Execute(object):
""" Menu.Execution code generation routines. """
def __init__(self):
""" Constructor. """
# Static class, nothing to be done
pass
@staticmethod
def execute(ctx, xml_menu):
""" Menu execution code generation.
@param ctx (Cli2Cpp) Execution context.
@param xml_menu (XML node) cli:cli or cli:menu node. """
ctx.Utils.indent(ctx, xml_menu, 1).put("// ----- Menu execution -----").endl()
# Execution method
ctx.Utils.indent(ctx, xml_menu, 1).put("public: virtual const bool Execute(const cli::CommandLine& CLI_CmdLine) const {").endl()
ctx.Utils.indent(ctx, xml_menu, 2).put("{").endl()
# Trace
ctx.Utils.indent(ctx, xml_menu, 3).put("static const cli::TraceClass CLI_EXECUTION(\"CLI_EXECUTION\", cli::Help()")
ctx.out.put(".AddHelp(cli::Help::LANG_EN, \"CLI Execution traces\")")
ctx.out.put(u".AddHelp(cli::Help::LANG_FR, \"Traces d'exécution du CLI\")")
ctx.out.put(");").endl()
# Step variables
ctx.Utils.indent(ctx, xml_menu, 3).put("cli::CommandLineIterator cli_Elements(CLI_CmdLine);").endl()
# Call implementation on the menu object
ctx.Menu.Execute.execute_node(ctx, xml_menu, xml_menu, indent_count=3)
# Finishing
ctx.Utils.indent(ctx, xml_menu, 2).put("}").endl()
ctx.Utils.indent(ctx, xml_menu, 2).put("return false;").endl()
ctx.Utils.indent(ctx, xml_menu, 1).put("}").endl()
ctx.out.endl()
@staticmethod
def execute_node(ctx, xml_menu, xml_node, indent_count):
""" Recursive execution code generation routine.
@param ctx (Cli2Cpp) Execution context.
@param xml_menu (XML node) cli:cli or cli:menu node.
@param xml_node (XML node) Current node which execution code is generated for.
@param indent_count (int) Number of indentation from the menu offset. """
# Top comment
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("// " + ctx.Utils.node2desc(ctx, xml_node)).endl()
# Top label
if ctx.xml.is_node_with_attr_value(xml_node, "cli:tag", "@hollow", "yes"):
# cli:tag[@hollow='yes']: direct jump to end label
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("goto %s;" % ctx.Utils.node2endlbl(ctx, xml_node)).endl()
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("%s: ;" % ctx.Utils.node2toplbl(ctx, xml_node)).endl()
# Start the block
if ctx.xml.is_node(xml_node, "cli:cli") or ctx.xml.is_node(xml_node, "cli:menu") or ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@ref"):
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("{").endl()
elif ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@id"):
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("do {").endl()
else:
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("if (cli_Elements == *%s) {" % ctx.Utils.node2var(ctx, xml_node)).endl()
# Step it
if (not ctx.xml.is_node(xml_node, "cli:tag")) and (not ctx.xml.is_node(xml_node, "cli:endl")):
ctx.Utils.indent(ctx, xml_menu, indent_count + 1).put("if (! cli_Elements.StepIt()) return false;").endl()
# Trace current keyword
ctx.Utils.indent(ctx, xml_menu, indent_count + 1).put("cli::GetTraces().Trace(CLI_EXECUTION)")
ctx.out.put(" << \"context = \\\"%s\\\", \"" % ctx.Utils.node2desc(ctx, xml_node))
ctx.out.put(" << \"word = \" << (")
ctx.out.put( "dynamic_cast<const cli::Endl*>(*cli_Elements) ") # pylint: disable=bad-whitespace
ctx.out.put( "? \"<CR>\" ") # pylint: disable=bad-whitespace
ctx.out.put( ": (const char*) (*cli_Elements)->GetKeyword()") # pylint: disable=bad-whitespace
ctx.out.put(") << cli::endl;").endl()
# Execution
_xml_sources = []
for _xml_source in ctx.xml.children(xml_node):
if ctx.xml.is_node(_xml_source, "cli:cpp"):
if ctx.xml.attr_value(_xml_source, "@option") is None:
_xml_sources.append(_xml_source)
if len(_xml_sources) > 0:
ctx.out.put(ctx.args.user_indent()).endl()
for _xml_source in _xml_sources:
ctx.Utils.indent(ctx, xml_menu, indent_count + 1, with_user_indent=True)
ctx.Utils.source_node(ctx, _xml_source)
ctx.out.endl()
ctx.out.put(ctx.args.user_indent()).endl()
# Sub-elements
ctx.Menu.Execute.walk(ctx, xml_menu, xml_node, indent_count=indent_count+1)
# Final jump
if ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@id"):
ctx.Utils.indent(ctx, xml_menu, indent_count + 1).put("goto %s;" % ctx.Utils.node2endlbl(ctx, xml_node)).endl()
elif ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@ref"):
pass
elif ctx.xml.is_node(xml_node, "cli:endl"):
pass
else:
ctx.Utils.indent(ctx, xml_menu, indent_count + 1).put("return false;").endl()
# End the block
if ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@id"):
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("} while(true);").endl()
else:
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("}").endl()
# End label
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("%s: ;" % ctx.Utils.node2endlbl(ctx, xml_node)).endl()
@staticmethod
def walk(ctx, xml_menu, xml_node, indent_count, xml_tag=None):
""" Second part of recursive execution code generation routine.
Propagate over sub-nodes.
@param ctx (Cli2Cpp) Execution context.
@param xml_menu (XML node) cli:cli or cli:menu node.
@param xml_node (XML node) Current node which execution code is generated for.
@param xml_tag (XML node) cli:tag[@id] reference being processed: only set jumps.
@param indent_count (int) Number of indentation from the menu offset. """
if ctx.xml.is_node(xml_node, "cli:endl"):
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("return true;").endl()
elif ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@ref"):
# Tag reference
_tag_name = ctx.xml.attr_value(xml_node, "@ref")
_xml_target = ctx.cache.node(xml_node).cli_Cli2xxx_tag_id
# Now check the target
if _xml_target is None:
ctx.Utils.abort(ctx, xml_node, "unknown tag reference '%s'" % _tag_name)
elif _xml_target == ctx.xml.parent_node(xml_node):
ctx.Utils.abort(ctx, xml_node, "tag reference '%s' directly located in the tag" % _tag_name)
else:
# Make the references
if xml_tag is not None:
ctx.Menu.Execute.walk(ctx, xml_menu, _xml_target, indent_count, xml_tag)
else:
ctx.Menu.Execute.walk(ctx, xml_menu, _xml_target, indent_count, _xml_target)
else:
for _xml_sub_node in ctx.xml.children(xml_node):
if ( # pylint: disable=bad-continuation
ctx.xml.is_node(_xml_sub_node, "cli:keyword") or ctx.xml.is_node(_xml_sub_node, "cli:param")
or ctx.xml.is_node(_xml_sub_node, "cli:tag") or ctx.xml.is_node(_xml_sub_node, "cli:endl")
):
if xml_tag is None:
ctx.Menu.Execute.execute_node(ctx, xml_menu, _xml_sub_node, indent_count)
else:
if ctx.xml.is_node_with_attr(_xml_sub_node, "cli:tag", "@ref"):
ctx.Menu.Execute.walk(ctx, xml_menu, _xml_sub_node, indent_count, xml_tag)
elif ctx.xml.is_node_with_attr_value(_xml_sub_node, "cli:tag", "@hollow", "yes"):
# Do nothing
pass
elif ctx.xml.is_node_with_attr(_xml_sub_node, "cli:tag", "@id"):
ctx.Menu.Execute.walk(ctx, xml_menu, _xml_sub_node, indent_count, xml_tag)
else:
ctx.Utils.indent(ctx, xml_menu, indent_count + 0).put("if (cli_Elements == *%s) " % ctx.Utils.node2var(ctx, _xml_sub_node))
ctx.out.put("goto %s;" % ctx.Utils.node2toplbl(ctx, xml_tag)).endl()
# Handlers
class Handlers(object):
""" Menu.Handlers code generation routines. """
def __init__(self, ):
""" Constructor. """
# Static class, nothing to be done
pass
@staticmethod
def execute(ctx, xml_menu):
""" Executes the Menu.Execute code generation routines.
@param ctx (Cli2Cpp) Execution context.
@param xml_menu (XML node) cli:cli or cli:menu node. """
ctx.Menu.Handlers.error_handler(ctx, xml_menu) # cli only
ctx.Menu.Handlers.exit_handler(ctx, xml_menu)
ctx.Menu.Handlers.prompt_handler(ctx, xml_menu)
@staticmethod
def error_handler(ctx, xml_menu):
""" Error handler code generation.
@param ctx (Cli2Cpp) Execution context.
@param xml_menu (XML node) cli:cli or cli:menu node. """
# Error handler
if ctx.xml.is_node(xml_menu, "cli:cli"):
ctx.Utils.indent(ctx, xml_menu, 1).put("public: virtual const bool OnError(") # pylint: disable=bad-whitespace
ctx.out .put( "const cli::ResourceString& location, ") # pylint: disable=bad-whitespace
ctx.out .put( "const cli::ResourceString& message") # pylint: disable=bad-whitespace
ctx.out .put(") const {").endl() # pylint: disable=bad-whitespace
_xml_extra_sources = ctx.xml.xpath_set(xml_menu, "cli:handler[@name='error']/cli:cpp")
if len(_xml_extra_sources) > 0:
ctx.out.put(ctx.args.user_indent()).endl()
for _xml_extra_source in _xml_extra_sources:
ctx.Utils.indent(ctx, xml_menu, 2, with_user_indent=True)
ctx.Utils.source_node(ctx, _xml_extra_source)
ctx.out.endl()
ctx.out.put(ctx.args.user_indent()).endl()
ctx.Utils.indent(ctx, xml_menu, 2).put("return Cli::OnError(location, message);").endl()
ctx.Utils.indent(ctx, xml_menu, 1).put("}").endl()
ctx.out.endl()
@staticmethod
def exit_handler(ctx, xml_menu):
""" Exit handler code generation.
@param ctx (Cli2Cpp) Execution context.
@param xml_menu (XML node) cli:cli or cli:menu node. """
ctx.Utils.indent(ctx, xml_menu, 1).put("public: virtual void OnExit(void) const {").endl()
_xml_extra_sources = ctx.xml.xpath_set(xml_menu, "cli:handler[@name='exit']/cli:cpp")
if len(_xml_extra_sources) > 0:
ctx.out.put(ctx.args.user_indent()).endl()
for _xml_extra_source in _xml_extra_sources:
ctx.Utils.indent(ctx, xml_menu, 2, with_user_indent=True)
ctx.Utils.source_node(ctx, _xml_extra_source)
ctx.out.endl()
ctx.out.put(ctx.args.user_indent()).endl()
ctx.Utils.indent(ctx, xml_menu, 1).put("}").endl()
ctx.out.endl()
@staticmethod
def prompt_handler(ctx, xml_menu):
""" Prompt handler code generation.
@param ctx (Cli2Cpp) Execution context.
@param xml_menu (XML node) cli:cli or cli:menu node. """
ctx.Utils.indent(ctx, xml_menu, 1).put("public: virtual const cli::tk::String OnPrompt(void) const {").endl()
_xml_extra_sources = ctx.xml.xpath_set(xml_menu, "cli:handler[@name='prompt']/cli:cpp")
if len(_xml_extra_sources) > 0:
ctx.out.put(ctx.args.user_indent()).endl()
for _xml_extra_source in _xml_extra_sources:
ctx.Utils.indent(ctx, xml_menu, 2, with_user_indent=True)
ctx.Utils.source_node(ctx, _xml_extra_source)
ctx.out.endl()
ctx.out.put(ctx.args.user_indent()).endl()
ctx.Utils.indent(ctx, xml_menu, 2).put("return Menu::OnPrompt();").endl()
ctx.Utils.indent(ctx, xml_menu, 1).put("}").endl()
ctx.out.endl()
class Utils(clicommon.CtxUtils):
""" Utils routines. """
def __init__(self):
""" Constructor. """
# Static class, nothing to be done
clicommon.CtxUtils.__init__(self)
@staticmethod
def extra_source(ctx, xml_node, option):
""" Extra source generation.
@param ctx (Cli2Cpp) Execution context.
@param xml_node (XML node) Focus node to generate extra source section for. May be None.
@param option (str) Extra source @option attribute value.
@return True for success, False otherwise. """
# Determine indentation count
_indent_count = 0
if option == "constructor":
_indent_count = 2
elif option == "members":
_indent_count = 1
| |
<reponame>pabs3/oci-python-sdk<filename>src/oci/identity/models/domain.py
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Domain(object):
"""
(For tenancies that support identity domains) Properties for an identity domain. An identity domain is used to manage users and groups, integration standards, external identities, and secure application integration through Oracle Single Sign-on (SSO) configuration.
"""
#: A constant which can be used with the type property of a Domain.
#: This constant has a value of "DEFAULT"
TYPE_DEFAULT = "DEFAULT"
#: A constant which can be used with the type property of a Domain.
#: This constant has a value of "SECONDARY"
TYPE_SECONDARY = "SECONDARY"
#: A constant which can be used with the lifecycle_state property of a Domain.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a Domain.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a Domain.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a Domain.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_details property of a Domain.
#: This constant has a value of "DEACTIVATING"
LIFECYCLE_DETAILS_DEACTIVATING = "DEACTIVATING"
#: A constant which can be used with the lifecycle_details property of a Domain.
#: This constant has a value of "ACTIVATING"
LIFECYCLE_DETAILS_ACTIVATING = "ACTIVATING"
#: A constant which can be used with the lifecycle_details property of a Domain.
#: This constant has a value of "UPDATING"
LIFECYCLE_DETAILS_UPDATING = "UPDATING"
def __init__(self, **kwargs):
"""
Initializes a new Domain object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this Domain.
:type id: str
:param compartment_id:
The value to assign to the compartment_id property of this Domain.
:type compartment_id: str
:param display_name:
The value to assign to the display_name property of this Domain.
:type display_name: str
:param description:
The value to assign to the description property of this Domain.
:type description: str
:param url:
The value to assign to the url property of this Domain.
:type url: str
:param home_region_url:
The value to assign to the home_region_url property of this Domain.
:type home_region_url: str
:param home_region:
The value to assign to the home_region property of this Domain.
:type home_region: str
:param replica_regions:
The value to assign to the replica_regions property of this Domain.
:type replica_regions: list[oci.identity.models.ReplicatedRegionDetails]
:param type:
The value to assign to the type property of this Domain.
Allowed values for this property are: "DEFAULT", "SECONDARY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type type: str
:param license_type:
The value to assign to the license_type property of this Domain.
:type license_type: str
:param is_hidden_on_login:
The value to assign to the is_hidden_on_login property of this Domain.
:type is_hidden_on_login: bool
:param time_created:
The value to assign to the time_created property of this Domain.
:type time_created: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this Domain.
Allowed values for this property are: "CREATING", "ACTIVE", "DELETING", "INACTIVE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this Domain.
Allowed values for this property are: "DEACTIVATING", "ACTIVATING", "UPDATING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_details: str
:param freeform_tags:
The value to assign to the freeform_tags property of this Domain.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this Domain.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'id': 'str',
'compartment_id': 'str',
'display_name': 'str',
'description': 'str',
'url': 'str',
'home_region_url': 'str',
'home_region': 'str',
'replica_regions': 'list[ReplicatedRegionDetails]',
'type': 'str',
'license_type': 'str',
'is_hidden_on_login': 'bool',
'time_created': 'datetime',
'lifecycle_state': 'str',
'lifecycle_details': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'id': 'id',
'compartment_id': 'compartmentId',
'display_name': 'displayName',
'description': 'description',
'url': 'url',
'home_region_url': 'homeRegionUrl',
'home_region': 'homeRegion',
'replica_regions': 'replicaRegions',
'type': 'type',
'license_type': 'licenseType',
'is_hidden_on_login': 'isHiddenOnLogin',
'time_created': 'timeCreated',
'lifecycle_state': 'lifecycleState',
'lifecycle_details': 'lifecycleDetails',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._id = None
self._compartment_id = None
self._display_name = None
self._description = None
self._url = None
self._home_region_url = None
self._home_region = None
self._replica_regions = None
self._type = None
self._license_type = None
self._is_hidden_on_login = None
self._time_created = None
self._lifecycle_state = None
self._lifecycle_details = None
self._freeform_tags = None
self._defined_tags = None
@property
def id(self):
"""
**[Required]** Gets the id of this Domain.
The OCID of the identity domain.
:return: The id of this Domain.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Domain.
The OCID of the identity domain.
:param id: The id of this Domain.
:type: str
"""
self._id = id
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this Domain.
The OCID of the compartment containing the identity domain.
:return: The compartment_id of this Domain.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this Domain.
The OCID of the compartment containing the identity domain.
:param compartment_id: The compartment_id of this Domain.
:type: str
"""
self._compartment_id = compartment_id
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this Domain.
The mutable display name of the identity domain.
:return: The display_name of this Domain.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this Domain.
The mutable display name of the identity domain.
:param display_name: The display_name of this Domain.
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""
**[Required]** Gets the description of this Domain.
The identity domain description. You can have an empty description.
:return: The description of this Domain.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Domain.
The identity domain description. You can have an empty description.
:param description: The description of this Domain.
:type: str
"""
self._description = description
@property
def url(self):
"""
**[Required]** Gets the url of this Domain.
Region-agnostic identity domain URL.
:return: The url of this Domain.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""
Sets the url of this Domain.
Region-agnostic identity domain URL.
:param url: The url of this Domain.
:type: str
"""
self._url = url
@property
def home_region_url(self):
"""
**[Required]** Gets the home_region_url of this Domain.
Region-specific identity domain URL.
:return: The home_region_url of this Domain.
:rtype: str
"""
return self._home_region_url
@home_region_url.setter
def home_region_url(self, home_region_url):
"""
Sets the home_region_url of this Domain.
Region-specific identity domain URL.
:param home_region_url: The home_region_url of this Domain.
:type: str
"""
self._home_region_url = home_region_url
@property
def home_region(self):
"""
**[Required]** Gets the home_region of this Domain.
The home region for the identity domain.
See `Regions and Availability Domains`__
for the full list of supported region names.
Example: `us-phoenix-1`
__ https://docs.cloud.oracle.com/Content/General/Concepts/regions.htm
:return: The home_region of this Domain.
:rtype: str
"""
return self._home_region
@home_region.setter
def home_region(self, home_region):
"""
Sets the home_region of this Domain.
The home region for the identity domain.
See `Regions and Availability Domains`__
for the full list of supported region names.
Example: `us-phoenix-1`
__ https://docs.cloud.oracle.com/Content/General/Concepts/regions.htm
:param home_region: The home_region of this Domain.
:type: str
"""
self._home_region = home_region
@property
def replica_regions(self):
"""
**[Required]** Gets the replica_regions of this Domain.
The regions where replicas of the identity domain exist.
:return: The replica_regions of this Domain.
:rtype: list[oci.identity.models.ReplicatedRegionDetails]
"""
return self._replica_regions
@replica_regions.setter
def replica_regions(self, | |
@0x24> pop eax; ret
0x0020: 0x3 arg0
0x0024: 0x10000007 pop eax; ret
0x0028: 0x77 [arg0] eax = SYS_sigreturn
0x002c: 0x10000000 int 0x80
0x0030: 0x0 gs
0x0034: 0x0 fs
0x0038: 0x0 es
0x003c: 0x0 ds
0x0040: 0x0 edi
0x0044: 0x0 esi
0x0048: 0x0 ebp
0x004c: 0x0 esp
0x0050: 0x4 ebx
0x0054: 0x6 edx
0x0058: 0x5 ecx
0x005c: 0xb eax = SYS_execve
0x0060: 0x0 trapno
0x0064: 0x0 err
0x0068: 0x10000000 int 0x80
0x006c: 0x23 cs
0x0070: 0x0 eflags
0x0074: 0x0 esp_at_signal
0x0078: 0x2b ss
0x007c: 0x0 fpstate
>>> r = ROP(e, 0x8048000)
>>> r.funcname(1, 2)
>>> r.funcname(3)
>>> r.execve(4, 5, 6)
>>> print(r.dump())
0x8048000: 0x10001234 funcname(1, 2)
0x8048004: 0x10000003 <adjust @0x8048018> add esp, 0x10; ret
0x8048008: 0x1 arg0
0x804800c: 0x2 arg1
0x8048010: b'eaaa' <pad>
0x8048014: b'faaa' <pad>
0x8048018: 0x10001234 funcname(3)
0x804801c: 0x10000007 <adjust @0x8048024> pop eax; ret
0x8048020: 0x3 arg0
0x8048024: 0x10000007 pop eax; ret
0x8048028: 0x77 [arg0] eax = SYS_sigreturn
0x804802c: 0x10000000 int 0x80
0x8048030: 0x0 gs
0x8048034: 0x0 fs
0x8048038: 0x0 es
0x804803c: 0x0 ds
0x8048040: 0x0 edi
0x8048044: 0x0 esi
0x8048048: 0x0 ebp
0x804804c: 0x8048080 esp
0x8048050: 0x4 ebx
0x8048054: 0x6 edx
0x8048058: 0x5 ecx
0x804805c: 0xb eax = SYS_execve
0x8048060: 0x0 trapno
0x8048064: 0x0 err
0x8048068: 0x10000000 int 0x80
0x804806c: 0x23 cs
0x8048070: 0x0 eflags
0x8048074: 0x0 esp_at_signal
0x8048078: 0x2b ss
0x804807c: 0x0 fpstate
>>> elf = ELF.from_assembly('ret')
>>> r = ROP(elf)
>>> r.ret.address == 0x10000000
True
>>> r = ROP(elf, badchars=b'\x00')
>>> r.gadgets == {}
True
>>> r.ret is None
True
"""
def __init__(self, elfs, base = None, badchars = b'', **kwargs):
"""
Arguments:
elfs(list): List of :class:`.ELF` objects for mining
base(int): Stack address where the first byte of the ROP chain lies, if known.
badchars(str): Characters which should not appear in ROP gadget addresses.
"""
import ropgadget
# Permit singular ROP(elf) vs ROP([elf])
if isinstance(elfs, ELF):
elfs = [elfs]
elif isinstance(elfs, (bytes, six.text_type)):
elfs = [ELF(elfs)]
#: List of individual ROP gadgets, ROP calls, SROP frames, etc.
#: This is intended to be the highest-level abstraction that we can muster.
self._chain = []
#: List of ELF files which are available for mining gadgets
self.elfs = elfs
#: Stack address where the first byte of the ROP chain lies, if known.
self.base = base
#: Whether or not the ROP chain directly sets the stack pointer to a value
#: which is not contiguous
self.migrated = False
#: Characters which should not appear in ROP gadget addresses.
self._badchars = set(badchars)
self.__load()
@staticmethod
@LocalContext
def from_blob(blob, *a, **kw):
return ROP(ELF.from_bytes(blob, *a, **kw))
def setRegisters(self, registers):
"""
Returns an list of addresses/values which will set the specified register context.
Arguments:
registers(dict): Dictionary of ``{register name: value}``
Returns:
A list of tuples, ordering the stack.
Each tuple is in the form of ``(value, name)`` where ``value`` is either a
gadget address or literal value to go on the stack, and ``name`` is either
a string name or other item which can be "unresolved".
Note:
This is basically an implementation of the Set Cover Problem, which is
NP-hard. This means that we will take polynomial time N**2, where N is
the number of gadgets. We can reduce runtime by discarding useless and
inferior gadgets ahead of time.
"""
if not registers:
return []
regset = set(registers)
bad_instructions = set(('syscall', 'sysenter', 'int 0x80'))
# Collect all gadgets which use these registers
# Also collect the "best" gadget for each combination of registers
gadgets = []
best_gadgets = {}
for gadget in self.gadgets.values():
# Do not use gadgets which doesn't end with 'ret'
if gadget.insns[-1] != 'ret':
continue
# Do not use gadgets which contain 'syscall' or 'int'
if set(gadget.insns) & bad_instructions:
continue
touched = tuple(regset & set(gadget.regs))
if not touched:
continue
old = best_gadgets.get(touched, gadget)
# if we have a new gadget for the touched registers, choose it
# if the new gadget requires less stack space, choose it
# if both gadgets require same stack space, choose the one with less instructions
if (old is gadget) \
or (old.move > gadget.move) \
or (old.move == gadget.move and len(old.insns) > len(gadget.insns)):
best_gadgets[touched] = gadget
winner = None
budget = 999999999
for num_gadgets in range(len(registers)):
for combo in itertools.combinations(sorted(best_gadgets.values(), key=repr, reverse=True), 1+num_gadgets):
# Is this better than what we can already do?
cost = sum((g.move for g in combo))
if cost > budget:
continue
# Does it hit all of the registers we want?
coverage = set(sum((g.regs for g in combo), [])) & regset
if coverage != regset:
continue
# It is better than what we had, and hits all of the registers.
winner = combo
budget = cost
if not winner:
log.error("Could not satisfy setRegisters(%r)", registers)
# We have our set of "winner" gadgets, let's build a stack!
stack = []
for gadget in winner:
moved = 8 # Account for the gadget itself
goodregs = set(gadget.regs) & regset
name = ",".join(goodregs)
stack.append((gadget.address, gadget))
for r in gadget.regs:
moved += 8
if r in registers:
stack.append((registers[r], r))
else:
stack.append((Padding('<pad %s>' % r), r))
for slot in range(moved, gadget.move, context.bytes):
left = gadget.move - slot
stack.append((Padding('<pad %#x>' % left), 'stack padding'))
return stack
def resolve(self, resolvable):
"""Resolves a symbol to an address
Arguments:
resolvable(str,int): Thing to convert into an address
Returns:
int containing address of 'resolvable', or None
"""
if isinstance(resolvable, str):
for elf in self.elfs:
if resolvable in elf.symbols:
return elf.symbols[resolvable]
if isinstance(resolvable, six.integer_types):
return resolvable
def unresolve(self, value):
"""Inverts 'resolve'. Given an address, it attempts to find a symbol
for it in the loaded ELF files. If none is found, it searches all
known gadgets, and returns the disassembly
Arguments:
value(int): Address to look up
Returns:
String containing the symbol name for the address, disassembly for a gadget
(if there's one at that address), or an empty string.
"""
for elf in self.elfs:
for name, addr in elf.symbols.items():
if addr == value:
return name
if value in self.gadgets:
return '; '.join(self.gadgets[value].insns)
return ''
def generatePadding(self, offset, count):
"""
Generates padding to be inserted into the ROP stack.
>>> rop = ROP([])
>>> val = rop.generatePadding(5,15)
>>> cyclic_find(val[:4])
5
>>> len(val)
15
>>> rop.generatePadding(0,0)
b''
"""
# Ensure we don't generate a cyclic pattern which contains badchars
alphabet = b''.join(packing.p8(c) for c in bytearray(string.ascii_lowercase.encode()) if c not in self._badchars)
if count:
return cyclic(offset + count, alphabet=alphabet)[-count:]
return b''
def describe(self, object):
"""
Return a description for an object in the ROP stack
"""
if isinstance(object, (Call, constants.Constant)):
return str(object)
if isinstance(object, six.integer_types):
return self.unresolve(object)
if isinstance(object, (bytes, six.text_type)):
return repr(object)
if isinstance(object, Gadget):
return '; '.join(object.insns)
def build(self, base = None, description = None):
"""
Construct the ROP chain into a list of elements which can be passed
to :func:`.flat`.
Arguments:
base(int):
The base address to build the rop-chain from. Defaults to
:attr:`base`.
description(dict):
Optional output argument, which will gets a mapping of
``address: description`` for each address on the stack,
starting at ``base``.
"""
if base is None:
base = self.base or 0
stack = DescriptiveStack(base)
chain = self._chain
#
# First pass
#
# Get everything onto the stack and save as much descriptive information
# as possible.
#
# The only replacements performed are to add stack adjustment gadgets
# (to move SP to the next gadget after a Call) and NextGadgetAddress,
# which can only be calculated in this pass.
#
iterable = enumerate(chain)
for idx, slot in iterable:
remaining = len(chain) - 1 - idx
address = stack.next
# Integers can just be added.
# Do our best to find out what the address is.
if isinstance(slot, six.integer_types):
stack.describe(self.describe(slot))
stack.append(slot)
# Byte blobs can also be added, however they must be
# broken down into pointer-width blobs.
elif isinstance(slot, (bytes, six.text_type)):
stack.describe(self.describe(slot))
if not isinstance(slot, bytes):
slot = slot.encode()
slot += self.generatePadding(stack.next, len(slot) % context.bytes)
| |
<filename>network.py
import os
import numpy as np
import tensorflow as tf
from utils.data_reader import H5DataLoader
from utils.img_utils import imsave
from utils import ops_param as ops
import scipy
import time
import pickle
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import math_ops
class Unet(object):
def __init__(self, sess, conf):
self.sess = sess
self.conf = conf
self.def_params()
if not os.path.exists(conf.modeldir):
os.makedirs(conf.modeldir)
if not os.path.exists(conf.logdir):
os.makedirs(conf.logdir)
if not os.path.exists(conf.sampledir):
os.makedirs(conf.sampledir)
self.configure_networks()
self.train_summary = self.config_summary('train')
self.valid_summary = self.config_summary('valid')
def def_params(self):
self.data_format = 'NHWC'
self.global_step = None
self.conv_size = (3, 3)
self.pool_size = (2, 2)
self.axis, self.channel_axis = (1, 2), 3
self.input_shape = [self.conf.batch, self.conf.height, self.conf.width, self.conf.channel]
self.output_shape = [self.conf.batch, self.conf.height, self.conf.width]
def configure_networks(self):
self.build_network()
optimizer = tf.train.AdamOptimizer(self.conf.learning_rate)
self.train_op = optimizer.minimize(self.loss_op, name='train_op')
tf.set_random_seed(self.conf.random_seed)
self.sess.run(tf.global_variables_initializer())
trainable_vars = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=trainable_vars, max_to_keep=500)
self.writer = tf.summary.FileWriter(self.conf.logdir)
print("Lets start")
def build_network(self):
self.inputs = tf.placeholder(
tf.float32, self.input_shape, name='inputs')
self.annotations = tf.placeholder(
tf.int64, self.output_shape, name='annotations')
self.predictions = self.inference(self.inputs)
self.cal_loss()
def cal_loss(self):
one_hot_annotations = tf.one_hot(
self.annotations, depth=self.conf.class_num,
axis=self.channel_axis, name='annotations/one_hot')
losses = tf.losses.softmax_cross_entropy(
one_hot_annotations, self.predictions, scope='loss/losses')
self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
self.decoded_predictions = tf.argmax(
self.predictions, self.channel_axis, name='accuracy/decode_pred')
correct_prediction = tf.equal(
self.annotations, self.decoded_predictions,
name='accuracy/correct_pred')
self.accuracy_op = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
name='accuracy/accuracy_op')
weights = tf.cast(
tf.greater(self.decoded_predictions, 0, name='m_iou/greater'),
tf.int32, name='m_iou/weights')
self.m_iou, self.miou_op = tf.metrics.mean_iou(
self.annotations, self.decoded_predictions, self.conf.class_num, weights, name='m_iou/m_ious')
# Flatten the input if its rank > 1.
predictions = self.decoded_predictions
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
labels = self.annotations
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
weights_conf = weights
if (weights_conf is not None) and (weights_conf.get_shape().ndims > 1):
weights_conf = array_ops.reshape(weights_conf, [-1])
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
self.confusion_matrix = confusion_matrix.confusion_matrix(
labels, predictions, self.conf.class_num, weights=weights_conf, dtype=tf.int32, name='confu_matrix/confu_matrix_op')
def config_summary(self, name):
summarys = []
summarys.append(tf.summary.scalar(name+'/loss', self.loss_op))
summarys.append(tf.summary.scalar(name+'/accuracy', self.accuracy_op))
summarys.append(tf.summary.scalar(name+'/mIoU', self.m_iou))
if name == 'valid':
summarys.append(tf.summary.image(
name+'/input', self.inputs, max_outputs=100))
summarys.append(tf.summary.image(
name +
'/annotation', tf.cast(tf.expand_dims(
self.annotations, -1), tf.float32),
max_outputs=100))
summarys.append(tf.summary.image(
name +
'/prediction', tf.cast(tf.expand_dims(
self.decoded_predictions, -1), tf.float32),
max_outputs=100))
summary = tf.summary.merge(summarys)
return summary
def inference(self, inputs):
outputs = inputs
down_outputs = []
for layer_index in range(self.conf.network_depth-1):
is_first = True if not layer_index else False
name = 'down%s' % layer_index
outputs = self.construct_down_block(
outputs, name, down_outputs, first=is_first)
outputs = self.construct_bottom_block(outputs, 'bottom')
for layer_index in range(self.conf.network_depth-2, -1, -1):
is_first = True if layer_index==self.conf.network_depth-2 else False
is_final = True if layer_index == 0 else False
name = 'up%s' % layer_index
down_inputs = down_outputs[layer_index]
if self.conf.deconv_name == 'sub_pixel_conv':
if self.conf.up_architecture == 1:
outputs = self.construct_up_1_block_sub_pixel(
outputs, down_inputs, name, final=is_final)
elif self.conf.up_architecture == 2:
outputs = self.construct_up_block_2_sub_pixel(
outputs, down_inputs, name, first=is_first, final=is_final)
elif self.conf.up_architecture == 3:
outputs = self.construct_up_block_3_sub_pixel(
outputs, down_inputs, name, first=is_first, final=is_final)
elif self.conf.up_architecture == 4:
outputs = self.construct_up_block_4_sub_pixel(
outputs, down_inputs, name, final=is_final)
elif self.conf.up_architecture == 5:
outputs = self.construct_up_block_5_sub_pixel(
outputs, down_inputs, name, final=is_final)
else:
outputs = self.construct_up_block(
outputs, down_inputs, name, final=is_final)
return outputs
def construct_down_block(self, inputs, name, down_outputs, first=False):
num_outputs = self.conf.start_channel_num if first else 2 * inputs.shape[self.channel_axis].value
conv1 = ops.conv2d(
inputs, num_outputs, self.conv_size, name+'/conv1', activation=self.conf.activation_function)
conv2 = ops.conv2d(
conv1, num_outputs, self.conv_size, name+'/conv2', activation=self.conf.activation_function)
down_outputs.append(conv2)
pool = ops.pool2d(
conv2, self.pool_size, name+'/pool')
return pool
def construct_bottom_block(self, inputs, name):
print("--------")
print("Bottom layer:")
print("Inputs:")
print(inputs.shape)
num_outputs = inputs.shape[self.channel_axis].value
conv1 = ops.conv2d(
inputs, 2*num_outputs, self.conv_size, name+'/conv1', activation=self.conf.activation_function)
print("Conv1:")
print(conv1.shape)
if self.conf.up_architecture == 1 or self.conf.up_architecture == 2:
conv2 = ops.conv2d(
conv1, num_outputs, self.conv_size, name+'/conv2', activation=self.conf.activation_function)
print("Conv2:")
print(conv2.shape)
return conv2
else:
return conv1
def construct_up_block_1_sub_pixel(self, inputs, down_inputs, name, final=False):
print("--------")
print("Inputs:")
print(inputs.shape)
num_outputs = inputs.shape[self.channel_axis].value
# producing r^2 times more feature maps
conv1 = tf.contrib.layers.conv2d(
inputs, num_outputs * self.conf.ratio ** 2, self.conv_size, scope=name + '/conv_sub_pixel',
data_format='NHWC', activation_fn=None, biases_initializer=None)
print("Conv before subpixel:")
print(conv1.shape)
sub_pixel_conv = self.deconv_func()(
inputs=conv1, scope=name + '/subpixel', r=self.conf.ratio, debug=self.conf.debug, activation=self.conf.activation_function)
print("Sub Pixel:")
print(sub_pixel_conv.shape)
concat = tf.concat(
[sub_pixel_conv, down_inputs], self.channel_axis, name=name + '/concat')
print("Sub Pixel + down inputs:")
print(concat.shape)
conv2 = ops.conv2d(
concat, num_outputs, self.conv_size, name + '/conv2', activation=self.conf.activation_function)
print("Conv after concat:")
print(conv2.shape)
num_outputs = self.conf.class_num if final else num_outputs / 2
conv3 = ops.conv2d(
conv2, num_outputs, self.conv_size, name + '/conv3', activation=self.conf.activation_function)
print("Conv:")
print(conv3.shape)
return conv3
def construct_up_block_2_sub_pixel(self, inputs, down_inputs, name, first=False, final=False):
print("--------")
print("Inputs:")
print(inputs.shape)
if first:
# producing r^2 times more feature maps
num_outputs = inputs.shape[self.channel_axis].value
inputs = tf.contrib.layers.conv2d(
inputs, num_outputs * self.conf.ratio ** 2, self.conv_size, scope=name + '/conv_sub_pixel',
data_format='NHWC', activation_fn=None, biases_initializer=None)
print("Conv before subpixel:")
print(inputs.shape)
sub_pixel_conv = self.deconv_func()(
inputs=inputs, scope=name + '/subpixel', r=self.conf.ratio, debug=self.conf.debug, activation=self.conf.activation_function)
print("Sub Pixel:")
print(sub_pixel_conv.shape)
concat = tf.concat(
[sub_pixel_conv, down_inputs], self.channel_axis, name=name + '/concat')
print("Sub Pixel + down inputs:")
print(concat.shape)
num_outputs = concat.shape[self.channel_axis].value
num_outputs = num_outputs/2 if final else num_outputs
conv2 = ops.conv2d(
concat, num_outputs, self.conv_size, name + '/conv2', activation=self.conf.activation_function)
print("Conv after concat:")
print(conv2.shape)
num_outputs = self.conf.class_num if final else num_outputs
conv3 = ops.conv2d(
conv2, num_outputs, self.conv_size, name + '/conv3', activation=self.conf.activation_function)
print("Conv:")
print(conv3.shape)
return conv3
def construct_up_block_3_sub_pixel(self, inputs, down_inputs, name, first=False, final=False):
print("--------")
print("Inputs:")
print(inputs.shape)
if first:
# producing r^2 times more feature maps (in this architecture the first layer only
# needs to produce 2x more feature maps)
num_outputs = inputs.shape[self.channel_axis].value
inputs = tf.contrib.layers.conv2d(
inputs, num_outputs*2, self.conv_size, scope=name + '/conv_sub_pixel',
data_format='NHWC', activation_fn=None, biases_initializer=None)
print("Conv before subpixel:")
print(inputs.shape)
sub_pixel_conv = self.deconv_func()(
inputs=inputs, scope=name + '/subpixel', r=self.conf.ratio, debug=self.conf.debug, activation=self.conf.activation_function)
print("Sub Pixel:")
print(sub_pixel_conv.shape)
outputs = tf.concat(
[sub_pixel_conv, down_inputs], self.channel_axis, name=name + '/concat')
print("Sub Pixel + down inputs:")
print(outputs.shape)
num_outputs = outputs.shape[self.channel_axis].value
if final:
outputs = ops.conv2d(
outputs, num_outputs, self.conv_size, name + '/unique_conv', activation=self.conf.activation_function)
print("Conv unique:")
print(outputs.shape)
num_outputs = num_outputs/2 if final else num_outputs
outputs = ops.conv2d(
outputs, num_outputs, self.conv_size, name + '/conv2', activation=self.conf.activation_function)
print("Conv after concat:")
print(outputs.shape)
num_outputs = self.conf.class_num if final else num_outputs
outputs = ops.conv2d(
outputs, num_outputs, self.conv_size, name + '/conv3', activation=self.conf.activation_function)
print("Conv:")
print(outputs.shape)
return outputs
def construct_up_block_4_sub_pixel(self, inputs, down_inputs, name, final=False):
print("--------")
print("Inputs:")
print(inputs.shape)
num_outputs = inputs.shape[self.channel_axis].value
# producing r times more feature maps
conv1 = tf.contrib.layers.conv2d(
inputs, num_outputs * self.conf.ratio, self.conv_size, scope=name + '/conv_sub_pixel',
data_format='NHWC', activation_fn=None, biases_initializer=None)
print("Conv before subpixel:")
print(conv1.shape)
sub_pixel_conv = self.deconv_func()(
inputs=conv1, scope=name + '/subpixel', r=self.conf.ratio, debug=self.conf.debug, activation=self.conf.activation_function)
print("Sub Pixel:")
print(sub_pixel_conv.shape)
concat = tf.concat(
[sub_pixel_conv, down_inputs], self.channel_axis, name=name + '/concat')
print("Sub Pixel + down inputs:")
print(concat.shape)
num_outputs = concat.shape[self.channel_axis].value / 2
conv2 = ops.conv2d(
concat, num_outputs, self.conv_size, name + '/conv2', activation=self.conf.activation_function)
print("Conv after concat:")
print(conv2.shape)
num_outputs = self.conf.class_num if final else num_outputs
conv3 = ops.conv2d(
conv2, num_outputs, self.conv_size, name + '/conv3', activation=self.conf.activation_function)
print("Conv:")
print(conv3.shape)
return conv3
def construct_up_block_5_sub_pixel(self, inputs, down_inputs, name, first=False, final=False):
print("--------")
print("Inputs:")
print(inputs.shape)
if first:
# producing r^2 times more feature maps (in this architecture the first layer only
# needs to produce 2x more feature maps)
num_outputs = inputs.shape[self.channel_axis].value
inputs = tf.contrib.layers.conv2d(
inputs, num_outputs*2, self.conv_size, scope=name + '/conv_sub_pixel',
data_format='NHWC', activation_fn=None, biases_initializer=None)
print("Conv before subpixel:")
print(inputs.shape)
sub_pixel_conv = self.deconv_func()(
inputs=inputs, scope=name + '/subpixel', r=self.conf.ratio, debug=self.conf.debug, activation=self.conf.activation_function)
print("Sub Pixel:")
print(sub_pixel_conv.shape)
outputs = tf.concat(
[sub_pixel_conv, down_inputs], self.channel_axis, name=name + '/concat')
print("Sub Pixel + down inputs:")
print(outputs.shape)
num_outputs = outputs.shape[self.channel_axis].value
if final:
outputs = ops.conv2d(
outputs, num_outputs, self.conv_size, name + '/unique_conv', activation=self.conf.activation_function)
print("Conv unique:")
print(outputs.shape)
num_outputs = num_outputs/2 if final else num_outputs
outputs = ops.conv2d(
outputs, num_outputs, self.conv_size, name + '/conv2', activation=self.conf.activation_function)
print("Conv after concat:")
print(outputs.shape)
if final:
print("Its the final one:")
num_outputs = self.conf.class_num
outputs = tf.contrib.layers.conv2d(
outputs, num_outputs, kernel_size=(1, 1), stride=1, scope=name + '/conv_final',
data_format='NHWC', activation_fn=None, biases_initializer=None, padding='VALID')
else:
outputs = ops.conv2d(
outputs, num_outputs, self.conv_size, name + '/conv3', activation=self.conf.activation_function)
print("Conv:")
print(outputs.shape)
return outputs
def construct_up_block(self, inputs, down_inputs, name, final=False):
num_outputs = inputs.shape[self.channel_axis].value
conv1 = self.deconv_func()(
inputs, num_outputs, self.conv_size, name+'/conv1')
print("--------")
print("Inputs:")
print(inputs.shape)
print("Deconv:")
print(conv1.shape)
print("Down inputs:")
print(down_inputs.shape)
conv1 = tf.concat(
[conv1, down_inputs], self.channel_axis, name=name+'/concat')
print("After concat:")
print(conv1.shape)
conv2 = self.conv_func()(
conv1, num_outputs, self.conv_size, name+'/conv2')
num_outputs = self.conf.class_num if final else num_outputs/2
print("Conv2:")
print(conv2.shape)
conv3 = ops.conv2d(
conv2, num_outputs, self.conv_size, name+'/conv3')
print("Conv3:")
print(conv3.shape)
print("--------")
return conv3
def deconv_func(self):
return getattr(ops, self.conf.deconv_name)
def conv_func(self):
return getattr(ops, self.conf.conv_name)
def save_summary(self, summary, step):
print('---->summarizing', step)
self.writer.add_summary(summary, step)
def train(self):
self.restore()
self.sess.run(tf.local_variables_initializer())
train_reader = H5DataLoader(self.conf.data_dir+self.conf.train_data)
valid_reader = H5DataLoader(self.conf.data_dir+self.conf.valid_data)
start_step = 0 if self.global_step is None else self.global_step+1
for epoch_num in range(start_step, self.conf.max_step+1):
print(epoch_num)
if epoch_num % self.conf.test_interval == 0:
inputs, annotations = valid_reader.next_batch(self.conf.batch)
feed_dict = {self.inputs: inputs,
self.annotations: annotations}
loss, summary = self.sess.run(
[self.loss_op, self.valid_summary], | |
Appendix C (Paul Grove) or Borkowski, K.M.,
# "Accurate Algorithms to Transform Geocentric to Geodetic Coordinates", Bull. Geod. 63, pp.50 - 56, 1989.
k1 = np.sqrt(1 - ecc_o ** 2) * abs(r_eb_e[2, 0])
k2 = (ecc_o ** 2) * R_0
beta = np.sqrt(r_eb_e[0, 0] ** 2 + r_eb_e[1, 0] ** 2)
e_term = (k1 - k2) / beta
f_term = (k1 + k2) / beta
p_term = (4 / 3.0) * (e_term * f_term + 1)
q_term = 2 * (e_term ** 2 - f_term ** 2)
d_term = p_term ** 3 + q_term ** 2
v_term = (np.sqrt(d_term) - q_term) ** (1 / 3.0) - (np.sqrt(d_term) + q_term) ** (1 / 3.0)
g_term = 0.5 * (np.sqrt(e_term ** 2 + v_term) + e_term)
t_term = np.sqrt(g_term ** 2 + (f_term - v_term * g_term) / (2 * g_term - e_term)) - g_term
lat_b = np.sign(r_eb_e[2, 0]) * np.arctan((1 - t_term ** 2) / (2 * t_term * np.sqrt(1 - ecc_o ** 2)))
h_b = (beta - R_0 * t_term) * np.cos(lat_b) + (r_eb_e[2, 0] -
np.sign(r_eb_e[2, 0]) * R_0 * np.sqrt(1 - ecc_o ** 2)) * np.sin(lat_b)
# Calculate ECEF to NED coordinate transformation matrix
c_e_n_matrix = ecef_to_ned_ctm(lat_b, lon_b, trig='no')
# Transform velocity
v_eb_n = c_e_n_matrix * v_eb_e
# Transform attitude
c_b_n_matrix = c_e_n_matrix * c_b_e_matrix
return lat_b, lon_b, h_b, v_eb_n, c_b_n_matrix
# End of Converting Position, Velocity, and CTM from ECEF to NED
'''
---------------------------------
9. Convert Position in LLA to XYZ
---------------------------------
'''
def lla_to_xyz(lat_b, lon_b, h_b):
# Calculate transverse radius of curvature
r_ew = R_0 / np.sqrt(1 - (ecc_o * np.sin(lat_b)) ** 2)
# Calculate ECEF to NED CTM using ECEF_to_NED_CTM()
[c_e_n_matrix, trig] = ecef_to_ned_ctm(lat_b, lon_b, trig='yes')
# Convert position
cos_lat = trig[0]
sin_lat = trig[1]
cos_lon = trig[2]
sin_lon = trig[3]
r_eb_e = np.matrix([[(r_ew + h_b) * cos_lat * cos_lon],
[(r_ew + h_b) * cos_lat * sin_lon],
[((1 - ecc_o ** 2) * r_ew + h_b) * sin_lat]])
return r_eb_e
# End of LLA to XYZ
'''
----------------------------------
10. Convert Position in XYZ to NED
----------------------------------
'''
def xyz_to_ned(r_eb_e, lat_b_ref, lon_b_ref, h_b_ref):
# Convert referenced position in LLA to ECEF
r_eb_e_ref = lla_to_xyz(lat_b_ref, lon_b_ref, h_b_ref)
# Compute the relative position vector in ECEF
delta_r_eb_e = r_eb_e - r_eb_e_ref
# Calculate ECEF to NED CTM using ECEF_to_NED_CTM()
c_e_n_matrix = ecef_to_ned_ctm(lat_b_ref, lon_b_ref, trig='no')
# Convert the relative position vector in ECEF to NED
r_eb_ned = c_e_n_matrix*delta_r_eb_e
return r_eb_ned
# End of XYZ to NED
'''
----------------------------------
11. Calculate Output Errors in NED
----------------------------------
'''
def cal_err_ned(est_lat_b, est_lon_b, est_alt_b, est_v_eb_n, est_ctm_b_n, true_lat_b, true_lon_b,
true_alt_b, true_v_eb_n, true_ctm_b_n):
# Earth's radii
[r_ns, r_ew] = radii_of_curv(true_lat_b)
# Position error calculation
delta_r_eb_n = np.nan * np.ones((3, 1))
delta_r_eb_n[0, 0] = (est_lat_b - true_lat_b) * (r_ns + true_alt_b)
delta_r_eb_n[1, 0] = (est_lon_b - true_lon_b) * (r_ew + true_alt_b) * np.cos(true_lat_b)
delta_r_eb_n[2, 0] = -(est_alt_b - true_alt_b)
# Velocity error calculation
delta_v_eb_n = est_v_eb_n - true_v_eb_n
# Attitude error calculation
delta_ctm_b_n = est_ctm_b_n * true_ctm_b_n.T
eul_err_nb_n = -ctm_to_euler(delta_ctm_b_n)
return delta_r_eb_n, delta_v_eb_n, eul_err_nb_n
# End of Calculating Errors in NED
'''
-----------------------------------------------------------------------------
12. Convert Position and Volocity Standard Deviations in ECEF to Those in NED
-----------------------------------------------------------------------------
'''
def ekfsd_ecef_to_lla(lat_b_ref, lon_b_ref, alt_b_ref, pva_eb_e):
# Separating the pva vector into:
rsd_eb_e = pva_eb_e[0, 0:3].T
vsd_eb_e = pva_eb_e[0, 3:6].T
euler_sd_eb_e = pva_eb_e[0, 6:9].T
# Earth's radii using (2.105-106)
[r_ns, r_ew] = radii_of_curv(lat_b_ref)
# Jacobian of xyz to lla using (2.119)
t_r_p_matrix = np.matrix([[1.0/(r_ns + alt_b_ref), 0.0, 0.0],
[0.0, 1.0/(r_ew + alt_b_ref) * np.cos(lat_b_ref), 0.0],
[0.0, 0.0, -1.0]])
# Calculate ECEF to NED coordinate transformation matrix using (2.150)
[c_e_n_matrix, trig] = ecef_to_ned_ctm(lat_b_ref, lon_b_ref, trig='yes')
# Transform position using (2.118)
rsd_eb_l = t_r_p_matrix * c_e_n_matrix * rsd_eb_e
# The down-component error
term_1 = rsd_eb_e[2, 0] / trig[1]
term_2 = np.sqrt(rsd_eb_e[0, 0] ** 2 + rsd_eb_e[1, 0] ** 2) / trig[0]
rsd_eb_l[2, 0] += term_1 + term_2
# ECEF to NED coordinate transformation matrix
delta_c_e_n_matrix = ecef_to_ned_ctm(rsd_eb_l[0, 0], rsd_eb_l[1, 0], trig='no')
# Transform velocity using (2.152)
vsd_eb_n = delta_c_e_n_matrix * vsd_eb_e
# Transform attitude using (2.152)
euler_sd_eb_n = delta_c_e_n_matrix * euler_sd_eb_e
return rsd_eb_l, vsd_eb_n, euler_sd_eb_n
# End of Converting Position, Velocity, and CTM from ECEF to NED
'''
--------------------------------------------------
13. Convert Position and Velocity from ECEF to NED
--------------------------------------------------
'''
def pv_ecef_to_lla(r_eb_e, v_eb_e):
# Compute the Longitude
lon_b = np.arctan2(r_eb_e[1, 0], r_eb_e[0, 0])
# Convert position using Borkowski closed-form exact solution in order to avoid while loop never exits. If doing
# this by iteration, we can't ensure while loop convergence. Refer to Appendix C (Paul Grove) or Borkowski, K.M.,
# "Accurate Algorithms to Transform Geocentric to Geodetic Coordinates", Bull. Geod. 63, pp.50 - 56, 1989.
k1 = np.sqrt(1 - ecc_o ** 2) * abs(r_eb_e[2, 0])
k2 = (ecc_o ** 2) * R_0
beta = np.sqrt(r_eb_e[0, 0] ** 2 + r_eb_e[1, 0] ** 2)
e_term = (k1 - k2) / beta
f_term = (k1 + k2) / beta
p_term = (4 / 3.0) * (e_term * f_term + 1)
q_term = 2 * (e_term ** 2 - f_term ** 2)
d_term = p_term ** 3 + q_term ** 2
v_term = (np.sqrt(d_term) - q_term) ** (1 / 3.0) - (np.sqrt(d_term) + q_term) ** (1 / 3.0)
g_term = 0.5 * (np.sqrt(e_term ** 2 + v_term) + e_term)
t_term = np.sqrt(g_term ** 2 + (f_term - v_term * g_term) / (2 * g_term - e_term)) - g_term
lat_b = np.sign(r_eb_e[2, 0]) * np.arctan((1 - t_term ** 2) / (2 * t_term * np.sqrt(1 - ecc_o ** 2)))
h_b = (beta - R_0 * t_term) * np.cos(lat_b) + (r_eb_e[2, 0] - np.sign(r_eb_e[2, 0]) * R_0 *
np.sqrt(1 - ecc_o ** 2)) * np.sin(lat_b)
# Calculate ECEF to NED coordinate transformation matrix
c_e_n_matrix = ecef_to_ned_ctm(lat_b, lon_b, trig='no')
# Transform velocity
v_eb_n = c_e_n_matrix * v_eb_e
return lat_b, lon_b, h_b, v_eb_n
# End of Converting Position an Velocity from ECEF to NED
'''
------------------------------
14. Initialized Attitude in NED
------------------------------
'''
def init_ned_att(c_b_n_matrix, eul_err_nb_n):
# Attitude initialization
delta_c_b_n_matrix = euler_to_ctm(-eul_err_nb_n)
est_c_b_n_matrix = delta_c_b_n_matrix * c_b_n_matrix
return est_c_b_n_matrix
# End of Initializing Attitude in NED
'''
------------------------------------------------------------
15. Progress Bar: Displays or Updates a Console Progress Bar
------------------------------------------------------------
'''
def progressbar(progress):
# Accepts "progress" as a float percentage between 0 and 1.
barlength = 25 # Modify this to change the length of the progress bar
status = " "
block = int(round(barlength * progress))
text = "\r NavSim: [{0}] {1}% {2}".format(">" * block + "-" * (barlength - block), int(round(progress * 100)),
status)
sys.stdout.write(text)
sys.stdout.flush()
# End of Progress Bar
'''
----------------------------------------------------------
16. Calculate the Earth Gravitational Force Vector in ECEF
----------------------------------------------------------
'''
def gravity_ecef(r_eb_e):
# Calculate distance from center of the Earth
mag_r = np.sqrt(r_eb_e.T * r_eb_e)
# If the input position is [0,0,0], produce a dummy output
if mag_r == 0:
gravity_vec = np.matrix(np.zeros((3, 1)))
else:
# Calculate gravitational acceleration
gravity_vec = np.nan * np.matrix(np.ones((3, 1)))
gamma = np.nan * np.matrix(np.ones((3, 1)))
z_scale = 5.0 * (r_eb_e[2, 0] / mag_r) ** 2
gamma[0, 0] = (-mu / mag_r ** 3) * (r_eb_e[0.0] + 1.5 * J_2 * (R_0 / mag_r) ** 2 * (1.0 - z_scale) *
r_eb_e[0, 0])
gamma[1, 0] = (-mu / mag_r ** 3) * (r_eb_e[1.0] + 1.5 * J_2 * (R_0 / mag_r) ** 2 * (1.0 - z_scale) *
r_eb_e[1, 0])
gamma[2, 0] = (-mu / mag_r ** 3) * (r_eb_e[2.0] + 1.5 * J_2 * (R_0 / mag_r) ** 2 * (1.0 - z_scale) *
r_eb_e[2, 0])
# Add centripetal acceleration
gravity_vec[0:2, 0] = gamma[0:2, 0] + OMEGA_ie ** 2 * r_eb_e[0:2, 0]
gravity_vec[2, 0] = gamma[2, 0]
return gravity_vec
# End of Calculating Earth Gravitation Force in ECEF
'''
-------------------------------------------
17. Earth Rotation Over the Update Interval
-------------------------------------------
'''
def c_earth(tau_i):
# Determine the Earth rotation over the update interval
alpha_ie = OMEGA_ie * tau_i
c_earth_matrix = np.matrix([[np.cos(alpha_ie), np.sin(alpha_ie), 0.0],
[-np.sin(alpha_ie), np.cos(alpha_ie), 0.0],
[0, 0, 1]])
return c_earth_matrix, alpha_ie
# End of Calculating the Earth Rotational Matrix
'''
-----------------------------------------------------------------------
18. Solve Kepler's Equation for | |
<filename>apero/plotting/plot_functions.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-10-03 at 10:51
@author: cook
"""
import numpy as np
from astropy import constants as cc
from astropy import units as uu
import copy
import os
import warnings
from apero.core import constants
from apero.core import math as mp
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'core.plotting.plot_functions.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# set up definition storage
definitions = []
# Speed of light
# noinspection PyUnresolvedReferences
speed_of_light_ms = cc.c.to(uu.m / uu.s).value
# noinspection PyUnresolvedReferences
speed_of_light = cc.c.to(uu.km / uu.s).value
# -----------------------------------------------------------------------------
# =============================================================================
# Define plotting class
# =============================================================================
class Graph:
def __init__(self, name, kind='debug', func=None, filename=None,
description=None, figsize=None, dpi=None):
self.name = name
# set kind
if kind in ['debug', 'summary', 'show']:
self.kind = kind
else:
self.kind = None
# set function
self.func = func
# storage of filename
self.filename = filename
# set the description
if description is None:
self.description = self.name
else:
self.description = str(description)
# set the figsize
if figsize is None:
self.figsize = (6.4, 4.8)
else:
self.figsize = figsize
# set the dots per inch
if dpi is None:
self.dpi = 100
else:
self.dpi = dpi
def copy(self):
"""
Make a copy of the Graph instance (don't ever want to set values to
the default ones defined below)
:return:
"""
name = copy.deepcopy(self.name)
# deep copy other parameters (deep copy as could be string or None)
kwargs = dict()
kwargs['kind'] = copy.deepcopy(self.kind)
kwargs['func'] = self.func
kwargs['filename'] = copy.deepcopy(self.filename)
kwargs['description'] = copy.deepcopy(self.description)
kwargs['figsize'] = copy.deepcopy(self.figsize)
kwargs['dpi'] = copy.deepcopy(self.dpi)
# return new instance
return Graph(name, **kwargs)
def set_filename(self, params, location, suffix=None):
"""
Set the file name for this Graph instance
:param params:
:param location:
:param suffix:
:return:
"""
# get pid
pid = params['PID']
# construct filename
filename = 'plot_{0}_{1}'.format(self.name, pid)
# make filename all lowercase
filename = filename.lower()
# deal with fiber
if suffix is not None:
filename += '_{0}'.format(suffix)
# construct absolute filename
self.filename = os.path.join(location, filename)
def set_figure(self, plotter, figsize=None, **kwargs):
# get plt from plotter (for matplotlib set up)
plt = plotter.plt
# get figure and frame
fig, frames = plt.subplots(**kwargs)
# set figure parameters
if figsize is None:
fig.set_size_inches(self.figsize)
else:
fig.set_size_inches(figsize)
# return figure and frames
return fig, frames
def set_grid(self, plotter, figsize=None, **kwargs):
# get plt from plotter (for matplotlib set up)
plt = plotter.plt
# get figure and frame
fig = plt.figure()
# get grid
gs = fig.add_gridspec(**kwargs)
# set figure parameters
if figsize is None:
fig.set_size_inches(self.figsize)
else:
fig.set_size_inches(figsize)
# return figure and frames
return fig, gs
class CrossCursor(object):
def __init__(self, frame, color='r', alpha=0.5):
self.frame = frame
# the horizontal line
self.lx = frame.axhline(color=color, alpha=alpha)
# the vertical line
self.ly = frame.axvline(color=color, alpha=alpha)
# set up the text box
bbox = dict(facecolor='white', edgecolor='blue', pad=5.0)
# text location in axes coords
self.txt = frame.text(0.8, 0.9, '', horizontalalignment='center',
verticalalignment='center', color='blue',
transform=frame.transAxes, bbox=bbox)
# start off the text without values
self.txt.set_text('x=NaN, y=NaN')
def mouse_move(self, event):
if not event.inaxes:
return
# get the new x and y locations
x, y = event.xdata, event.ydata
# update the line positions
self.lx.set_ydata(y)
self.ly.set_xdata(x)
# set the text
self.txt.set_text('x={0:.2f}, y={1:.2f}'.format(x, y))
# update canvas
self.frame.figure.canvas.draw()
class ClickCursor(object):
def __init__(self, fig, frame):
self.fig = fig
self.frame = frame
def mouse_click(self, event):
# noinspection PyProtectedMember
if self.fig.canvas.manager.toolbar._active:
return
if not event.inaxes:
return
# get the new x and y locations
x, y = event.xdata, event.ydata
# print the position of the cursor
print('PLOT x={0:.2f}, y={1:.2f}'.format(x, y))
# =============================================================================
# Define user graph functions
# =============================================================================
def ulegend(frame=None, plotter=None, **kwargs):
# deal with no frame set
if frame is None:
frame = plotter.plt.gca()
# get current legends labels and handles
all_h, all_l = frame.get_legend_handles_labels()
# storage
unique_h, unique_l = [], []
# loop around labels and only keep unique labels
for it, label in enumerate(all_l):
if label not in unique_l:
unique_l.append(label)
unique_h.append(all_h[it])
# plot legend
frame.legend(unique_h, unique_l, **kwargs)
def mc_line(frame, plt, line, x, y, z, norm=None, cmap=None):
"""
Create a line coloured by vector "z"
From here:
https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/
multicolored_line.html
:param frame:
:param plt:
:param line:
:param x:
:param y:
:param z:
:param norm:
:param cmap:
:return:
"""
# deal with no colormap
if cmap is None:
cmap = 'viridis'
# Create a continuous norm to map from data points to colors
if norm is None:
norm = plt.Normalize(np.nanmin(z), np.nanmax(z))
# Create a set of line segments so that we can color them individually
# This creates the points as a N x 1 x 2 array so that we can stack points
# together easily to get the segments. The segments array for line
# collection needs to be (numlines) x (points per line) x 2 (for x and y)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
# plot the segments
lc = line(segments, cmap=cmap, norm=norm)
# Set the values used for colormapping
lc.set_array(z)
# return the line
return frame.add_collection(lc)
def remove_first_last_ticks(frame, axis='x'):
if axis == 'x' or axis == 'both':
xticks = frame.get_xticks()
xticklabels = xticks.astype(str)
xticklabels[0], xticklabels[-1] = '', ''
frame.set_xticks(xticks)
frame.set_xticklabels(xticklabels)
if axis == 'y' or axis == 'both':
yticks = frame.get_xticks()
yticklabels = yticks.astype(str)
yticklabels[0], yticklabels[-1] = '', ''
frame.set_xticks(yticks)
frame.set_xticklabels(yticklabels)
return frame
def add_grid(frame):
frame.minorticks_on()
# Don't allow the axis to be on top of your data
frame.grid(which='major', linestyle='-', linewidth='0.5', color='black',
alpha=0.75, zorder=1)
frame.grid(which='minor', linestyle=':', linewidth='0.5', color='black',
alpha=0.5, zorder=0)
# =============================================================================
# Define test plotting functions
# =============================================================================
def graph_test_plot_1(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
x = kwargs['x']
y = kwargs['y']
colour = kwargs['colour']
# ------------------------------------------------------------------
# plot
fig, frame = graph.set_figure(plotter)
frame.plot(x, y, color=colour, label='test')
frame.legend(loc=0)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def graph_test_plot_2(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
orders = kwargs['ord']
x_arr = kwargs['x']
y_arr = kwargs['y']
colour = kwargs.get('colour', 'k')
# ------------------------------------------------------------------
# get the plot generator
generator = plotter.plotloop(orders)
# prompt to start looper
plotter.close_plots(loop=True)
# loop aroun the orders
for ord_num in generator:
fig, frame = graph.set_figure(plotter)
frame.plot(x_arr[ord_num], y_arr[ord_num], color=colour)
frame.set_title('Order {0}'.format(ord_num))
# update filename (adding order_num to end)
suffix = 'order{0}'.format(ord_num)
graph.set_filename(plotter.params, plotter.location, suffix=suffix)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
# defined graphing instances
test_plot1 = Graph('TEST1', kind='summary', func=graph_test_plot_1,
description='This is a test plot',
figsize=(10, 10), dpi=150)
test_plot2 = Graph('TEST2', kind='debug', func=graph_test_plot_1)
test_plot3 = Graph('TEST3', kind='debug', func=graph_test_plot_2)
test_plot4 = Graph('TEST4', kind='summary', func=graph_test_plot_2)
# add to definitions
definitions += [test_plot1, test_plot2, test_plot3, test_plot4]
# =============================================================================
# Define dark plotting functions
# =============================================================================
def plot_dark_image_regions(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# get plt
plt = plotter.plt
# get matplotlib rectange
rectangle = plotter.matplotlib.patches.Rectangle
# ------------------------------------------------------------------
# get the arguments from kwargs
params = kwargs['params']
image = kwargs['image']
# get parameters from params
bxlow = params['IMAGE_X_BLUE_LOW']
bxhigh = params['IMAGE_X_BLUE_HIGH']
bylow = params['IMAGE_Y_BLUE_LOW']
byhigh = params['IMAGE_Y_BLUE_HIGH']
rxlow = params['IMAGE_X_RED_LOW']
rxhigh = params['IMAGE_X_RED_HIGH']
rylow = params['IMAGE_Y_RED_LOW']
ryhigh = params['IMAGE_Y_RED_HIGH']
med = kwargs['med']
# ------------------------------------------------------------------
# adjust for backwards limits
if bxlow > bxhigh:
bxlow, bxhigh = bxhigh - 1, bxlow - 1
if bylow > byhigh:
bylow, byhigh = byhigh - 1, bylow - 1
# adjust for backwards limits
if rxlow > rxhigh:
rxlow, rxhigh = rxhigh - 1, rxlow - 1
if rylow > ryhigh:
rylow, ryhigh = ryhigh - 1, rylow - 1
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter)
# plot the image
clim = (0., 10 * med)
im = frame.imshow(image, origin='lower', clim=clim, cmap='viridis')
# plot blue rectangle
brec = rectangle((bxlow, bylow), bxhigh - bxlow, byhigh - bylow,
edgecolor='b', facecolor='None')
frame.add_patch(brec)
# plot blue rectangle
rrec = rectangle((rxlow, rylow), rxhigh - rxlow, ryhigh - rylow,
edgecolor='r', facecolor='None')
frame.add_patch(rrec)
# add colorbar
plt.colorbar(im)
# set title
frame.set_title('Dark image with red and blue regions')
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_dark_histogram(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
| |
<filename>utils/extract_utility.py
"""Contains classes for running and working with extracts.
Classes:
ExtractObject
ValidateObject
MergeObject
FeatureExtractTool
FeatureMergeObject
Also contains misc. related functions
"""
import sys
import os
import errno
import warnings
import time
from numpy import isnan
import pandas as pd
# import geopandas as gpd
import fiona
from shapely.geometry import shape
from shapely.prepared import prep
from shapely.geos import TopologicalError
import pymongo
# sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import geo_rasterstats as rs
# -----------------------------------------------------------------------------
def make_dir(path):
"""Make directory.
Args:
path (str): absolute path for directory
Raise error if error other than directory exists occurs.
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def str_to_range(value):
"""Generate year range based on year string range segment input.
Args:
value (str): year string range segment (see parse_year_string()
documentation for year string details)
Returns:
year range (List[int]):
"""
if not isinstance(value, str):
raise Exception("str_to_range: input must be str")
range_split = value.split(':')
if len(range_split) != 2:
raise Exception("str_to_range: result of split must be 2 items")
try:
start = int(range_split[0])
end = int(range_split[1]) + 1
except:
raise Exception("str_to_range: invalid years")
return range(start, end)
def parse_year_string(value):
"""Generate list of years based on year string input.
Years string are a simple method of specifying years and year ranges using
a single string.
Through parsing of the year string, a list of years is generated.
The 4 components of a year string are:
1) single years - simply notated using the 4 digit year (eg: 1995),
this add an individual year to the list
2) year ranges - ranges are two years separated by a colon
(eg: 1990:2000). year ranges add all years
starting with & including the first year and
going up to & including the last year to the
year list
3) negation - placing an exclamation point (!) in front of any
year or year range will remove the specified year
or year range from the list
4) separator - the separator or pipe (|) is used to separate each
portion of your year string
Year strings are parsed sequentially, meaning that each pipe separated
portion of the year string will be parse in order and will override any
previous segments. The resulting list is "positive" which means that only
accepted years are included (ie: a year string with only negations will
be empty.)
Examples:
- 1980|1990:1992 = ['1980', '1990', '1991', '1992']
- 1980:1982 = ['1980', '1981', '1982']
- 1980:1982|!1981 = ['1980', '1982']
- 1985:1987|!1980:1990 = []
Args:
value (str): year string (see above for details on year strings)
Returns:
year list (List[str]): list of strings generated based on year string
"""
statements = [x for x in str(value).split('|') if x != ""]
tmp_years = []
for i in statements:
if i.startswith('!'):
if ':' in i:
tmp_range = str_to_range(i[1:])
tmp_years = [y for y in tmp_years if y not in tmp_range]
else:
try:
year = int(i[1:])
except:
raise Exception("parse_year_string: invalid year")
tmp_years = [y for y in tmp_years if y != year]
else:
if ':' in i:
tmp_range = str_to_range(i)
tmp_years += tmp_range
else:
try:
year = int(i)
except:
raise Exception("parse_year_string: invalid year")
tmp_years += [year]
return map(str, tmp_years)
def get_years(value):
"""Get years.
Defines how to handle empty year strings
Args:
value (str): string which may be a year string or empty
"""
if value == None:
value = ""
statements = [x for x in str(value).split('|') if x != ""]
if len(statements) == 0:
tmp_years = map(str, range(1000,10000))
else:
tmp_years = parse_year_string(value)
return tmp_years
# -----------------------------------------------------------------------------
class ExtractObject():
"""Contains variables and functions needed to validate and run extracts.
Attributes (static):
_extract_options (List[str]): list of available extract options
_vector_extensions (List[str]): valid file extensions for vector files
_raster_extensions (List[str]): valid file extensions for raster files
Attributes (args):
_builder (bool): indicates whether ExtractObject is being called by
builder (prevents portions of code from being run
when extracts are not actually going to be run)
Attributes (variable):
_vector_path (str): path to vector file
_extract_type (str): selected extract type (mean, max, etc.)
_base_path (str): base path for datasets to be extracted
# default_years (List[int]): default year list to use when no years
are provided
_years (List[str]): list of years generated by parsing year string
_file_mask (str): file mask used to parse date information from
data file
_run_option (str): automatically generated. used to identify temporal
type of dataset (based on file mask)
# _raster_path (str): path to raster file
"""
# available extract types and associated identifiers
_extract_options = [
"categorical",
"mean",
"count",
"sum",
"min",
"max",
# "std",
"reliability",
"encoded",
"median"
# "majority"
# "minority"
# "unique"
# "range"
# "percentile_?"
# "custom_?"
# "var"
# "mode"
]
# accepted vector file extensions
_vector_extensions = [".geojson", ".shp"]
# accepted raster file extensions
_raster_extensions = [".tif", ".asc"]
def __init__(self, builder=False):
self._builder = builder
self._vector_path = None
self._extract_type = None
self._base_path = None
# self.default_years = range(1000, 10000)
self._years = []
self._file_mask = None
self._run_option = None
self._reliability = False
self._reliability_geojson = None
# self._raster_path = None
self.pixel_limit = 250000
def set_vector_path(self, value):
"""Set vector file path.
should this have more advanced vector checks? (ie load and test)
Args:
value (str): vector file path
"""
if not value.endswith(tuple(self._vector_extensions)):
raise Exception("invalid vector extension (" + value + ")")
if not os.path.isfile(value):
raise Exception("set_vector_path: vector does not exist " +
"(" + value + ")")
self._vector_path = value
# vector_dirname = os.path.dirname(value)
# vector_filename, vector_extension = os.path.splitext(
# os.path.basename(value))
def set_base_path(self, value):
"""Set data base path.
Args:
value (str): base path where year/day directories for processed
data are located
"""
# validate base_path
if not os.path.exists(value):
raise Exception("base_path is not valid ("+ value +")")
self._base_path = value
self._check_file_mask()
# self._check_reliability()
def _set_reliability(self, value):
if value == "reliability":
self._reliability = True
self._check_reliability()
else:
self._reliability = False
def _check_reliability(self):
"""Verify that files needed to run reliability exist.
"""
if self._reliability != False and self._base_path != None:
if os.path.isfile(self._base_path):
base_path_dir = os.path.dirname(self._base_path)
else:
base_path_dir = self._base_path
if not os.path.isfile(base_path_dir + "/unique.geojson"):
raise Exception("check_reliability: reliability geojson" +
" does not exist.")
else:
self._reliability_geojson = base_path_dir + "/unique.geojson"
# reliability geojson (mean surface features with aid info)
# self._rgeo = fiona.open(self._reliability_geojson)
def set_years(self, value):
"""Set years.
If year string is empty, accept all years found when searching
for data.
Args:
value (str): year string
"""
self._years = get_years(value)
def _check_file_mask(self):
"""Run general validation of file_mask based on base_path
Makes sure that:
1) temporally invariant file masks (ie "None") are not used with
a base path that indicates temporal data (ie base_path is directory)
2) temporal file masks (ie not "None") are not used with a base path
that indicates temporally invariant data (ie base_path is file)
"""
if (self._file_mask == "None" and self._base_path != None and
not self._base_path.endswith(tuple(self._raster_extensions))):
raise Exception("check_file_mask: invalid use of None file_mask " +
"based on base_path")
elif (self._file_mask not in [None, "None"] and
self._base_path != None and
self._base_path.endswith(tuple(self._raster_extensions))):
raise Exception("check_file_mask: invalid use of temporal " +
"file_mask based on base_path")
def set_file_mask(self, value):
"""Set file mask.
Args:
value (str): file mask
"""
if value == "None":
tmp_run_option = 1
elif "YYYY" in value:
if "MM" in value and not "DDD" in value:
tmp_run_option = 3
elif "DDD" in value and not "MM" in value:
tmp_run_option = 4
elif not "MM" in value and not "DDD" in value:
tmp_run_option = 2
else:
raise Exception("set_file_mask: ambiguous temporal string " +
"("+str(value)+")")
else:
raise Exception("set_file_mask: invalid file mask " +
"("+str(value)+")")
self._file_mask = value
self._run_option = str(tmp_run_option)
self._check_file_mask()
def set_extract_type(self, value, category_map=None):
"""Set extract type.
Args:
value (str): extract type
"""
# validate input extract type
if value not in self._extract_options:
raise Exception("invalid extract type (" + value + ")")
if value == "categorical":
if not isinstance(category_map, dict):
raise Exception("invalid category map (" +
str(category_map) + ")")
for k, v in category_map.items():
if not isinstance(v, (int, float)):
raise Exception("invalid category map value (" + str(v) +
") for key '" + str(k) + "'")
# swap because it is reverse in original json
# add ad_ prefix | |
if j == 0:
(art,) = axes.plot(fitx_, fity, "-b", label=label)
else:
(art,) = axes.plot(fitx_, fity, "-b")
if i == 0:
(art,) = axes.plot(
Knorm_bin_edges[j, i][:-1],
spe_cts_all[j, i],
"o",
label=str(time_steps[j]) + " ms",
)
else:
(art,) = axes.plot(
Knorm_bin_edges[j, i][:-1],
spe_cts_all[j, i],
"o",
)
axes.set_xlim(0, 3.5)
if ylim is not None:
axes.set_ylim(ylim)
# Annotate the best K and M values on the plot
axes.annotate(
r"%s" % txt,
xy=(1, 0.25),
xycoords="axes fraction",
fontsize=10,
horizontalalignment="right",
verticalalignment="bottom",
)
axes.set_title("Q " + "%.4f " % (q_ring_center[i]) + r"$\AA^{-1}$")
axes.legend(loc="best", fontsize=6)
# plt.show()
fig.tight_layout()
return M_val, K_val
def plot_xsvs_g2(g2, taus, res_pargs=None, *argv, **kwargs):
"""plot g2 results,
g2: one-time correlation function
taus: the time delays
res_pargs, a dict, can contains
uid/path/qr_center/qz_center/
kwargs: can contains
vlim: [vmin,vmax]: for the plot limit of y, the y-limit will be [vmin * min(y), vmx*max(y)]
ylim/xlim: the limit of y and x
e.g.
plot_gisaxs_g2( g2b, taus= np.arange( g2b.shape[0]) *timeperframe, q_ring_center = q_ring_center, vlim=[.99, 1.01] )
"""
if res_pargs is not None:
uid = res_pargs["uid"]
path = res_pargs["path"]
q_ring_center = res_pargs["q_ring_center"]
else:
if "uid" in kwargs.keys():
uid = kwargs["uid"]
else:
uid = "uid"
if "q_ring_center" in kwargs.keys():
q_ring_center = kwargs["q_ring_center"]
else:
q_ring_center = np.arange(g2.shape[1])
if "path" in kwargs.keys():
path = kwargs["path"]
else:
path = ""
num_rings = g2.shape[1]
sx = int(round(np.sqrt(num_rings)))
if num_rings % sx == 0:
sy = int(num_rings / sx)
else:
sy = int(num_rings / sx + 1)
# print (num_rings)
if num_rings != 1:
# fig = plt.figure(figsize=(14, 10))
fig = plt.figure(figsize=(12, 10))
plt.axis("off")
# plt.axes(frameon=False)
# print ('here')
plt.xticks([])
plt.yticks([])
else:
fig = plt.figure(figsize=(8, 8))
plt.title("uid= %s" % uid, fontsize=20, y=1.06)
for i in range(num_rings):
ax = fig.add_subplot(sx, sy, i + 1)
ax.set_ylabel("beta")
ax.set_title(" Q= " + "%.5f " % (q_ring_center[i]) + r"$\AA^{-1}$")
y = g2[:, i]
# print (y)
ax.semilogx(taus, y, "-o", markersize=6)
# ax.set_ylim([min(y)*.95, max(y[1:])*1.05 ])
if "ylim" in kwargs:
ax.set_ylim(kwargs["ylim"])
elif "vlim" in kwargs:
vmin, vmax = kwargs["vlim"]
ax.set_ylim([min(y) * vmin, max(y[1:]) * vmax])
else:
pass
if "xlim" in kwargs:
ax.set_xlim(kwargs["xlim"])
dt = datetime.now()
CurTime = "%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
fp = path + "g2--uid=%s" % (uid) + CurTime + ".png"
fig.savefig(fp, dpi=fig.dpi)
fig.tight_layout()
# plt.show()
###########################3
#
def nbinomlog(p, hist, x, N):
"""Residuals for maximum likelihood fit to nbinom distribution.
Vary M (shape param) and mu (count rate) vary (using leastsq)"""
mu, M = p
mu = abs(mu)
M = abs(M)
w = np.where(hist > 0.0)
Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M))
err = 2 * (Np - hist)
err[w] = err[w] - 2 * hist[w] * np.log(Np[w] / hist[w]) # note: sum(Np-hist)==0
return np.sqrt(np.abs(err))
# return err
def nbinomlog1(p, hist, x, N, mu):
"""Residuals for maximum likelihood fit to nbinom distribution.
Vary M (shape param) but mu (count rate) fixed (using leastsq)"""
M = abs(p[0])
w = np.where(hist > 0.0)
Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M))
err = 2 * (Np - hist)
err[w] = err[w] - 2 * hist[w] * np.log(Np[w] / hist[w]) # note: sum(Np-hist)==0
return np.sqrt(np.abs(err))
def nbinomlog1_notworknow(p, hist, x, N, mu):
"""Residuals for maximum likelihood fit to nbinom distribution.
Vary M (shape param) but mu (count rate) fixed (using leastsq)"""
M = abs(p[0])
w = np.where(hist > 0.0)
Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M))
err = 2 * (Np - hist)
err[w] = err[w] - 2 * hist[w] * np.log(Np[w] / hist[w]) # note: sum(Np-hist)==0
# return np.sqrt(err)
return err
def nbinomres(p, hist, x, N):
""" residuals to leastsq() to fit normal chi-square"""
mu, M = p
Np = N * st.nbinom.pmf(x, M, 1.0 / (1.0 + mu / M))
err = (hist - Np) / np.sqrt(Np)
return err
def get_xsvs_fit(
spe_cts_all,
K_mean,
varyK=True,
max_bins=None,
qth=None,
g2=None,
times=None,
taus=None,
):
"""
Fit the xsvs by Negative Binomial Function using max-likelihood chi-squares
"""
max_cts = spe_cts_all[0][0].shape[0] - 1
num_times, num_rings = spe_cts_all.shape
if max_bins is not None:
num_times = min(num_times, max_bins)
bin_edges, bin_centers, Knorm_bin_edges, Knorm_bin_centers = get_bin_edges(
num_times, num_rings, K_mean, int(max_cts + 2)
)
if g2 is not None:
g2c = g2.copy()
g2c[0] = g2[1]
ML_val = {}
KL_val = {}
K_ = []
if qth is not None:
range_ = range(qth, qth + 1)
else:
range_ = range(num_rings)
for i in range_:
N = 1
ML_val[i] = []
KL_val[i] = []
if g2 is not None:
mi_g2 = 1 / (g2c[:, i] - 1)
m_ = np.interp(times, taus, mi_g2)
for j in range(num_times):
x_, x, y = (
bin_edges[j, i][:-1],
Knorm_bin_edges[j, i][:-1],
spe_cts_all[j, i],
)
if g2 is not None:
m0 = m_[j]
else:
m0 = 10
# resultL = minimize(nbinom_lnlike, [K_mean[i] * 2**j, m0], args=(x_, y) )
# the normal leastsq
# result_n = leastsq(nbinomres, [K_mean[i] * 2**j, m0], args=(y,x_,N),full_output=1)
# not vary K
if not varyK:
resultL = leastsq(
nbinomlog1,
[m0],
args=(y, x_, N, K_mean[i] * 2 ** j),
ftol=1.49012e-38,
xtol=1.49012e-38,
factor=100,
full_output=1,
)
ML_val[i].append(abs(resultL[0][0]))
KL_val[i].append(K_mean[i] * 2 ** j) # resultL[0][0] )
else:
# vary M and K
resultL = leastsq(
nbinomlog,
[K_mean[i] * 2 ** j, m0],
args=(y, x_, N),
ftol=1.49012e-38,
xtol=1.49012e-38,
factor=100,
full_output=1,
)
ML_val[i].append(abs(resultL[0][1]))
KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] )
# print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j )
if j == 0:
K_.append(KL_val[i][0])
return ML_val, KL_val, np.array(K_)
def plot_xsvs_fit(
spe_cts_all,
ML_val,
KL_val,
K_mean,
xlim=[0, 15],
ylim=[1e-8, 1],
q_ring_center=None,
uid="uid",
qth=None,
times=None,
fontsize=3,
):
fig = plt.figure(figsize=(9, 6))
plt.title(
"uid= %s" % uid + " Fitting with Negative Binomial Function",
fontsize=20,
y=1.02,
)
plt.axes(frameon=False)
plt.xticks([])
plt.yticks([])
max_cts = spe_cts_all[0][0].shape[0] - 1
num_times, num_rings = spe_cts_all.shape
bin_edges, bin_centers, Knorm_bin_edges, Knorm_bin_centers = get_bin_edges(
num_times, num_rings, K_mean, int(max_cts + 2)
)
if qth is not None:
range_ = range(qth, qth + 1)
num_times = len(ML_val[qth])
else:
range_ = range(num_rings)
num_times = len(ML_val[0])
# for i in range(num_rings):
sx = int(round(np.sqrt(len(range_))))
if len(range_) % sx == 0:
sy = int(len(range_) / sx)
else:
sy = int(len(range_) / sx + 1)
n = 1
for i in range_:
axes = fig.add_subplot(sx, sy, n)
axes.set_xlabel("K/<K>")
axes.set_ylabel("P(K)")
n += 1
for j in range(num_times):
# print( i, j )
x_, x, y = (
bin_edges[j, i][:-1],
Knorm_bin_edges[j, i][:-1],
spe_cts_all[j, i],
)
# Using the best K and M values interpolate and get more values for fitting curve
xscale = bin_edges[j, i][:-1][1] / Knorm_bin_edges[j, i][:-1][1]
fitx = np.linspace(0, max_cts * 2 ** j, 5000)
fitx_ = fitx / xscale
# fity = nbinom_dist( fitx, K_val[i][j], M_val[i][j] )
fitL = nbinom_dist(fitx, KL_val[i][j], ML_val[i][j])
if j == 0:
(art,) = axes.semilogy(fitx_, fitL, "-r", label="nbinom_L")
# art, = axes.semilogy( fitx_,fity, '--b', label="nbinom")
else:
(art,) = axes.plot(fitx_, fitL, "-r")
# art, = axes.plot( fitx_,fity, '--b')
if i == 0:
if times is not None:
label = str(times[j] * 1000) + " ms"
else:
label = "Bin_%s" % (2 ** j)
(art,) = axes.plot(x, y, "o", label=label)
else:
(art,) = axes.plot(
x,
y,
"o",
)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
axes.set_title("Q=" + "%.4f " % (q_ring_center[i]) + r"$\AA^{-1}$")
axes.legend(loc="best", fontsize=fontsize)
# plt.show()
fig.tight_layout()
def get_max_countc(FD, labeled_array):
"""Compute the max intensity of ROIs in the compressed file (FD)
Parameters
----------
FD: Multifile class
compressed file
labeled_array : array
labeled array; 0 is background.
Each ROI is represented by a nonzero integer. It is not required that
the ROI labels are contiguous
index : int, list, optional
The ROI's to use. If None, this function will extract averages for all
ROIs
Returns
-------
max_intensity : a float
index : list
The labels for each element of the `mean_intensity` list
"""
qind, pixelist = roi.extract_label_indices(labeled_array)
timg = np.zeros(FD.md["ncols"] * FD.md["nrows"], dtype=np.int32)
timg[pixelist] = np.arange(1, len(pixelist) + 1)
if labeled_array.shape != (FD.md["ncols"], FD.md["nrows"]):
raise ValueError(
" `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)"
% (
FD.md["ncols"],
FD.md["nrows"],
labeled_array.shape[0],
labeled_array.shape[1],
)
)
max_inten = 0
for i in tqdm(
range(FD.beg, FD.end, 1), desc="Get max intensity of | |
import re
import math
import torch
import torchaudio
import numpy as np
import argparse
import librosa
import warnings
import itertools as it
from jiwer import wer
from tqdm import tqdm
import pandas as pd
import os
try:
from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from flashlight.lib.text.dictionary import create_word_dict, load_words
from flashlight.lib.text.decoder import (
CriterionType,
LexiconDecoderOptions,
KenLM,
LM,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
)
from flashlight.lib.text.decoder import LexiconFreeDecoder, LexiconFreeDecoderOptions
except:
warnings.warn(
"flashlight python bindings are required to use this KenLM. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
)
LM = object
LMState = object
from datasets import load_dataset, load_metric, concatenate_datasets
from transformers import Wav2Vec2CTCTokenizer
from transformers import Wav2Vec2FeatureExtractor
from transformers import Wav2Vec2Processor
from transformers import Wav2Vec2ForCTC
from utils.generic_utils import load_config, load_vocab, calculate_wer
from utils.dataset_preprocessed import remove_extra_columns, parse_dataset_dict, vocab_to_string, DataColletor
from torch.utils.data import DataLoader
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
# BERT
from transformers import BertTokenizer, BertForMaskedLM
def score_fun_linear(s1, s2, w1=1, w2=1):
return s1*w1 + s2*w2
class Scorer:
def __init__(self, model_name, kenLM_weight=1, externalLM_weight=1, score_fn=score_fun_linear):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.tokenizer = GPT2TokenizerFast.from_pretrained(model_name)
self.model = GPT2LMHeadModel.from_pretrained(model_name).to(self.device)
self.model.eval()
self.kenLM_weight = kenLM_weight
self.externalLM_weight = externalLM_weight
self.score_fn = score_fn
print('---->>> Testing Model.')
self.test_model(['a capital da frança é paris', 'a capital da franca é paris', 'a capital da frança é parir'])
print('---->>> Done testing model')
def calculate_perplexity(self, text):
encodings = self.tokenizer(text, return_tensors="pt")
max_length = self.model.config.n_positions
# stride = 1
stride = 512
lls = []
for i in (range(0, encodings.input_ids.size(1), stride)):
begin_loc = max(i + stride - max_length, 0)
end_loc = min(i + stride, encodings.input_ids.size(1))
trg_len = end_loc - i
input_ids = encodings.input_ids[:,
begin_loc:end_loc].to(self.device)
target_ids = input_ids.clone()
target_ids[:, :-trg_len] = -100
with torch.no_grad():
outputs = self.model(input_ids, labels=target_ids)
log_likelihood = outputs[0] * trg_len
if not math.isnan(log_likelihood):
lls.append(log_likelihood)
ppl = torch.exp(torch.stack(lls).sum() / end_loc).item()
return ppl
def test_model(self, candidates):
for candidate in candidates:
ppl = self.calculate_perplexity(candidate)
print("{0} ---- {1}".format(candidate, ppl))
def chose_best_candidate(self, candidates, candidate_scores):
best_candidate = None
best_candidate_id = None
best_score = float('inf')
for i in range(len(candidates)):
# *-1 because in kenLM high is better
kenlm_score = candidate_scores[i] * -1
candidate = candidates[i]
external_lm_score = self.calculate_perplexity(candidate)
new_score = self.score_fn(kenlm_score, external_lm_score, self.kenLM_weight, self.externalLM_weight)
# print(candidate, "-->", new_score)
if new_score < best_score:
best_candidate = candidate
best_candidate_id = i
best_score = new_score
return (best_candidate_id, best_candidate, best_score)
def remove_invalid_characters(batch):
text = batch[text_column].lower()
text = re.sub("[^{}]".format(vocab_string), " ", text)
text = re.sub("[ ]+", " ", text)
batch[text_column] = text + " "
return batch
def load_audio(batch):
if dataset_base_path:
batch[audio_path_column] = os.path.join(dataset_base_path, batch[audio_path_column])
speech_array, sampling_rate = torchaudio.load(batch[audio_path_column])
batch["speech"] = speech_array.squeeze().numpy()
batch["sampling_rate"] = sampling_rate
if text_column in batch:
batch["target_text"] = batch[text_column]
return batch
def resample_audio(batch):
if batch["sampling_rate"] != config['sampling_rate']:
batch["speech"] = librosa.resample(np.asarray(batch["speech"]), batch["sampling_rate"], config['sampling_rate'])
batch["sampling_rate"] = config['sampling_rate']
return batch
def prepare_dataset(batch):
batch['audio_path'] = batch[audio_path_column]
batch["input_values"] = processor(batch["speech"], sampling_rate=config['sampling_rate']).input_values
if "target_text" in batch:
with processor.as_target_processor():
batch["labels"] = processor(batch["target_text"]).input_ids
return batch
class KenLMDecoder(object):
def __init__(self, kenlm_args, vocab_dict, rescore_args=None, blank="<pad>", silence="|", unk="<unk>"):
self.vocab_size = len(vocab_dict)
self.blank_token = (vocab_dict[blank])
self.silence_token = vocab_dict[silence]
self.unk_token = vocab_dict[unk]
self.nbest = kenlm_args['nbest']
if kenlm_args['lexicon_path']:
vocab_keys = vocab_dict.keys()
self.lexicon = load_words(kenlm_args['lexicon_path'])
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index(unk)
self.lm = KenLM(kenlm_args['kenlm_model_path'], self.word_dict)
self.trie = Trie(self.vocab_size, self.silence_token)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = []
for token in spelling:
if token.upper() in vocab_keys:
spelling_idxs.append(vocab_dict[token.upper()])
elif token.lower() in vocab_keys:
spelling_idxs.append(vocab_dict[token.lower()])
else:
print("WARNING: The token", token, "not exist in your vocabulary, using <unk> token instead")
spelling_idxs.append(self.unk_token)
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=kenlm_args['beam'],
beam_size_token=kenlm_args['beam_size_token'] if "beam_size_token" in kenlm_args else len(vocab_dict),
beam_threshold=kenlm_args['beam_threshold'],
lm_weight=kenlm_args['lm_weight'],
word_score=kenlm_args['word_score'],
unk_score=-math.inf,
sil_score=kenlm_args['sil_weight'],
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence_token,
self.blank_token,
self.unk_word,
[],
False,
)
else:
d = {w: [[w]] for w in vocab_dict.keys()}
self.word_dict = create_word_dict(d)
self.lm = KenLM(kenlm_args['kenlm_model_path'], self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=kenlm_args['beam'],
beam_size_token=kenlm_args['beam_size_token'] if "beam_size_token" in kenlm_args else len(vocab_dict),
beam_threshold=kenlm_args['beam_threshold'],
lm_weight=kenlm_args['lm_weight'],
sil_score=kenlm_args['sil_weight'],
log_add=False,
criterion_type=CriterionType.CTC,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence_token, self.blank_token, []
)
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank"""
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank_token, idxs)
return torch.LongTensor(list(idxs))
def decode(self, emissions):
B, T, N = emissions.size()
# print(emissions.shape)
tokens = []
scores = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
tokens_nbest = []
scores_nbest = []
for result in nbest_results:
tokens_nbest.append(result.tokens)
scores_nbest.append(result.score)
tokens.append(tokens_nbest)
scores.append(scores_nbest)
token_array = np.array(tokens, dtype=object)
scores_arrray = np.array(scores, dtype=object)
return token_array, scores_arrray
def test(model, test_dataset, processor, kenlm, calcule_wer=True, return_predictions=False):
model.eval()
predictions = []
tot_samples = 0
tot_wer = 0
tot_cer = 0
with torch.no_grad():
for batch in tqdm(test_dataset):
input_values, attention_mask = batch['input_values'], batch['attention_mask']
if calcule_wer:
labels = batch['labels']
if USE_CUDA:
input_values = input_values.cuda(non_blocking=True)
attention_mask = attention_mask.cuda(non_blocking=True)
if calcule_wer:
labels = labels.cuda(non_blocking=True)
logits = model(input_values, attention_mask=attention_mask).logits
if kenlm:
logits = torch.nn.functional.log_softmax(logits.float(), dim=-1)
# get all candidates
lm_tokens, lm_scores = kenlm.decode(logits.cpu().detach())
# choise the best candidate
if rescore_lm:
pred_ids = []
for b in range(logits.size(0)):
candidates_ids = []
scores = []
for c in range(len(lm_tokens[b])):
candidate = lm_tokens[b][c]
score = lm_scores[b][c]
candidates_ids.append(candidate)
scores.append(score)
candidates_text = processor.batch_decode(candidates_ids)
# if less than 3 tokens, ignore rescore
if len(candidates_text[0].split(' ')) < 3:
# use the best kenLM candidate
pred_id = candidates_ids[0]
else:
best_candidate_id, _, _ = rescore_lm.chose_best_candidate(candidates_text, scores)
pred_id = candidates_ids[best_candidate_id]
pred_ids.append(pred_id)
else:
pred_ids = []
for b in range(logits.size(0)):
pred_ids.append(lm_tokens[b][0])
pred_ids = np.array(pred_ids)
else:
pred_ids = np.argmax(logits.cpu().detach().numpy(), axis=-1)
if calcule_wer:
# compute metrics
wer, cer = calculate_wer(pred_ids, labels.cpu().detach().numpy(), processor)
tot_wer += wer
tot_cer += cer
if return_predictions:
audios_path = batch['audio_path']
# get text
pred_string = processor.batch_decode(pred_ids)
for i in range(len(audios_path)):
output_wav_path = audios_path[i]
if dataset_base_path:
output_wav_path = output_wav_path.replace(dataset_base_path, '').replace(dataset_base_path+'/', '')
predictions.append([output_wav_path, pred_string[i].lower()])
tot_samples += input_values.size(0)
if calcule_wer:
# calculate avg of metrics
avg_wer = tot_wer/tot_samples
avg_cer = tot_cer/tot_samples
print("\n\n --> TEST PERFORMANCE\n")
print(" | > : WER ({:.5f})\n".format(avg_wer))
print(" | > : CER ({:.5f})\n".format(avg_cer))
return predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config_path', type=str, required=True,
help="json file with configurations")
parser.add_argument('--checkpoint_path_or_name', type=str, required=True,
help="path or name of checkpoints")
parser.add_argument('--no_use_kenlm', default=False, action='store_true',
help="Not use KenLm during inference ?")
parser.add_argument('--rescore', default=False, action='store_true',
help="Use a external LM to rescore?")
parser.add_argument('--audio_path', type=str, default=None,
help="If it's passed the inference will be done in all audio files in this path and the dataset present in the config json will be ignored")
parser.add_argument('--output_csv', type=str, default=None,
help="CSV for save all predictions")
args = parser.parse_args()
config = load_config(args.config_path)
# Use CUDA
USE_CUDA = torch.cuda.is_available()
model = Wav2Vec2ForCTC.from_pretrained(args.checkpoint_path_or_name)
feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=config['sampling_rate'], padding_value=0.0, do_normalize=True, return_attention_mask=True)
processor = Wav2Vec2Processor.from_pretrained(args.checkpoint_path_or_name)
vocab_dict = processor.tokenizer.get_vocab()
pad_token = processor.tokenizer.pad_token
silence_token = processor.tokenizer.word_delimiter_token
unk_token = processor.tokenizer.unk_token
# if the model uses upper words in vocab force tokenizer lower case for compatibility with our data loader
if list(vocab_dict.keys())[-1].isupper():
processor.tokenizer.do_lower_case = True
data_collator = DataColletor(processor=processor, padding=True, test=True)
if USE_CUDA:
model = model.cuda()
if not args.no_use_kenlm:
print("> Inference using KenLM")
kenlm = KenLMDecoder(config.KenLM, vocab_dict, blank=pad_token, silence=silence_token, unk=unk_token)
else:
print("> Inference without KenLM")
kenlm = None
if args.rescore:
rescore_args = config.rescore if "rescore" in config.keys() else None
if rescore_args:
print("> Inference with External LM rescoring")
rescore_lm = Scorer(rescore_args['lm_path_or_name'], kenLM_weight=rescore_args['KenLM_weight'], externalLM_weight=rescore_args['ExternalLM_weight'])
else:
print("> Inference without External LM rescoring")
rescore_lm = None
else:
print("> Inference without External LM rescoring")
rescore_lm = None
if not args.audio_path:
# load dataset
test_dataset_config = config.datasets['test']
text_column, audio_path_column = parse_dataset_dict(test_dataset_config)
dataset = load_dataset(**test_dataset_config)
# made compatibility with csv load
if isinstance(dataset, dict) and 'train' in dataset.keys():
concat_list = []
for k in dataset.keys():
concat_list.append(dataset[k])
dataset = concatenate_datasets(concat_list)
if 'files_path' in config['datasets'].keys() and config.datasets['files_path']:
if test_dataset_config['name'].lower() == 'csv':
dataset_base_path = config.datasets['files_path']
else:
print("> Warning: datasets['files_path'] igonored because dataset is not CSV !")
dataset_base_path = None
else:
dataset_base_path = None
# preprocess dataset
dataset = remove_extra_columns(dataset, text_column, audio_path_column)
vocab_string = vocab_to_string(vocab_dict, pad_token, silence_token, unk_token).lower()
print("\n\n> Remove invalid chars \n\n")
# remove invalid chars
dataset = dataset.map(remove_invalid_characters, num_proc=config['num_loader_workers'])
# Load audio files
dataset = dataset.map(load_audio)
print("\n\n> Resample Audio Files \n\n")
# resample audio files if necessary
dataset = dataset.map(resample_audio, num_proc=config['num_loader_workers'])
print("\n\n> Prepare dataset \n\n")
# batched dataset
dataset = dataset.map(prepare_dataset, remove_columns=dataset.column_names, batch_size=config['batch_size'], num_proc=1, batched=True)
test_dataset = DataLoader(dataset=dataset,
batch_size=config['batch_size'],
collate_fn=data_collator,
shuffle=True,
num_workers=config['num_loader_workers'])
print("\n\n> Starting Evaluation | |
<gh_stars>1-10
#coding:utf-8
#
# id: bugs.core_0461
# title: JOIN including a complex view kills the server
# decription:
# NB: all versions of 2.1 and 2.5 fail on 2nd query (issue 2002-jul-12) with message about
# "too many contexts, max = 256" so this test checks only FB 3.0 and above.
#
# tracker_id: CORE-0461
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set bail on;
create domain d_global_id as varchar(15) not null ;
create domain d_long_desc as varchar(200);
create domain d_group as integer default 0 check ((value is not null));
create domain d_global_ref as varchar(15);
create domain d_icon as smallint check (((value is null) or (value between 0 and 8)));
recreate table knowledgestreams (
stream_id d_global_id not null,
name d_long_desc,
content_groups d_group,
constraint pk_knowledgestreams primary key (stream_id)
);
recreate table mainmenu (
menu_id d_global_id not null,
parent_id d_global_ref,
description d_long_desc,
content_group d_group not null,
icon d_icon,
constraint pk_mainmenu primary key (menu_id)
);
alter table mainmenu add constraint fk_mainmenu foreign key (parent_id)
references mainmenu(menu_id) on delete cascade on update cascade;
recreate table menu_groups (
menu_id d_global_id not null,
content_id d_global_id not null
);
create index menu_groups_idx1 on menu_groups (menu_id);
create index menu_groups_idx2 on menu_groups (content_id);
recreate table streammenu (
stream_id d_global_id not null,
parent d_global_id not null,
constraint pk_streammenu primary key (parent, stream_id)
);
alter table streammenu add constraint fk_streammenu_parent foreign key
(parent) references mainmenu(menu_id) on delete cascade;
alter table streammenu add constraint fk_streammenu_stream_id foreign
key (stream_id) references knowledgestreams(stream_id) on delete
cascade;
create view fullmenu (
code,
parent,
description,
link,
content_group
) as
select menu_id,parent_id,description,cast(null as
varchar(100)),content_group from mainmenu
union all
select m.stream_id, m.parent, s.name
,cast('/servlets/uk.co.wmeng.intelus.knowledgestream?action=display&id='
|| s.stream_id as varchar(100)),content_groups from streammenu m join
knowledgestreams s on s.stream_id = m.stream_id
;
-------------------------------------------------
create table drzava
(
pozivnibrojdrzave varchar(4) not null,
nazivdrzave varchar(20),
grupa integer not null,
primary key (pozivnibrojdrzave)
);
create table log
(
broj varchar(25) not null,
pocetak timestamp not null,
trajanje integer not null,
lokal integer,
linija integer,
cena numeric(8,2) not null
);
create table lokal
(
brojlokala integer not null,
nazivlokala varchar(25) not null,
primary key (brojlokala)
);
create table mesni
(
ptt char(5) not null,
lokalniprefix varchar(5) not null,
primary key (ptt, lokalniprefix)
);
create table mreza
(
brojmreze varchar(4) not null,
pozivnibroj varchar(4) not null,
zona integer not null,
primary key (brojmreze, pozivnibroj)
);
create table vrstart
(
sifravrt char(7) not null,
nazivvrt varchar(30) not null,
jm varchar(6),
primary key (sifravrt)
);
create table poslovnica
(
sifraposlovnice char(2) not null,
nazivposlovnice varchar(18) not null,
primary key (sifraposlovnice)
);
create table rezijskitrosak
(
rednibroj integer not null,
datumtroska timestamp not null,
sifraposlovnice char(2) not null references
poslovnica (sifraposlovnice) on update cascade,
sifravrt char(7) not null references vrstart
(sifravrt) on update cascade,
kolicina decimal(8,2),
iznos decimal(8,2) not null,
primary key (rednibroj)
);
create generator gen_rt_id;
set generator gen_rt_id to 0;
create table vrstamt
(
sifravmt char(7) not null,
nazivvmt varchar(30) not null,
defaultjm varchar(6),
primary key (sifravmt)
);
create table roba
(
sifrarobe char(6) not null,
vrstarobe char(7) not null references vrstamt
(sifravmt) on update cascade,
nazivrobe varchar(30) not null,
jm varchar(6) not null,
barcode varchar(50),
pakovanje integer,
napomena varchar(100),
primary key (sifrarobe)
);
create table mesto
(
ptt char(5) not null,
nazivmesta varchar(40) not null,
pozivnibroj char(4),
primary key (ptt)
);
create table komitent
(
sifrakomitenta integer not null,
naziv varchar(25) not null ,
ptt char(5) not null references mesto
(ptt) on update cascade,
napomena varchar(100),
owner char(8),
primary key (sifrakomitenta)
);
create generator gen_komitent_id;
set generator gen_komitent_id to 0;
create table vrstadetalja
(
sifravd integer not null,
opisvd varchar(15),
telefon char(1),
check (telefon is null or telefon = 'd' or telefon = 'z'),
primary key(sifravd)
);
create generator gen_vrstadetalja_id;
set generator gen_vrstadetalja_id to 0;
create table komitentdetaljno
(
sifrakd integer not null,
sifrakomitenta integer not null references komitent
(sifrakomitenta) on update cascade on delete
cascade,
sifravd integer not null references
vrstadetalja (sifravd) on update cascade,
podatak varchar(40) not null,
cistbroj varchar(25),
primary key(sifrakd)
);
create generator gen_komitentdetaljno_id;
set generator gen_komitentdetaljno_id to 0;
create table prijem
(
brdok integer not null,
datumulaza timestamp not null,
sifrakomitenta integer references komitent
(sifrakomitenta) on update cascade,
primary key (brdok)
);
create generator gen_prij_id;
set generator gen_prij_id to 0;
create table prijemst
(
brdok integer not null references prijem
(brdok) on update cascade on delete cascade,
sifrarobe char(6) not null references roba
(sifrarobe) on update cascade,
kolicina decimal(8,2) not null,
cena decimal(8,2) not null,
primary key (brdok, sifrarobe)
);
create table alokacija
(
brdok integer not null,
datum timestamp not null,
sifraposlovnice char(2) not null references poslovnica
(sifraposlovnice) on update cascade,
primary key (brdok)
);
create generator gen_alok_id;
set generator gen_alok_id to 1;
create table alokacijast
(
brdok integer not null references alokacija
(brdok) on update cascade on delete cascade,
sifrarobe char(6) not null references roba
(sifrarobe) on update cascade,
kolicina decimal(8,2) not null,
cena decimal(8,2) not null,
primary key (brdok, sifrarobe)
);
create table vrstagoriva
(
sifravrstegoriva integer not null,
nazivvrstegoriva varchar(10) not null,
primary key (sifravrstegoriva)
);
create table vrstavozila
(
sifravrste char(2) not null,
nazivvrste varchar(18) not null,
primary key (sifravrste)
);
create table vozilo
(
sifravozila char(12) not null,
sifravrste char(2) not null references
vrstavozila (sifravrste) on update cascade,
regbroj char(10),
marka char(10),
tip char(20),
brojsasije char(25),
brojmotora char(25),
prvaregistracija timestamp,
snagamotora decimal(10,2),
zapremina integer,
nosivost integer,
mestazasedenje char(4),
karoserija char(25),
boja char(20),
brojosovina char(1),
rokppaparata timestamp,
primary key (sifravozila)
);
create table vozac
(
sifravozaca integer not null,
ime char(25) not null,
kategorije char(5) not null,
datumvazenjadozvole timestamp,
primary key (sifravozaca)
);
create table sipanjegoriva
(
sifrasg integer not null,
datum timestamp not null,
sifravozila char(12) not null references vozilo
(sifravozila) on update cascade,
sifravozaca integer not null references vozac
(sifravozaca) on update cascade,
sifravrstegoriva integer not null references
vrstagoriva (sifravrstegoriva) on update cascade,
sifraposlovnice char(2) not null references
poslovnica (sifraposlovnice) on update cascade,
kmsat decimal(9,1),
kolicina decimal(10, 2) not null,
cena decimal(8,2) not null,
pundocepa char(1),
check (pundocepa = 'n' or pundocepa = 'd'),
primary key (sifrasg)
);
create generator gen_gorivo_id;
set generator gen_gorivo_id to 1;
create table popravka
(
datum timestamp not null,
sifravozila char(12) not null references vozilo
(sifravozila) on update cascade,
sifravozaca integer not null references vozac
(sifravozaca) on update cascade,
sifraposlovnice char(2) not null references
poslovnica (sifraposlovnice) on update cascade,
iznos decimal(12,2) not null,
opis varchar(200),
primary key (datum,sifravozila)
);
create table registracija
(
datum timestamp not null,
sifravozila char(12) not null references vozilo
(sifravozila) on update cascade,
cenatehnickog decimal(12,2),
cenaosiguranja decimal(12,2),
ostalitroskovi decimal(12,2),
sifraposlovnice char(2) not null references
poslovnica (sifraposlovnice) on update cascade,
primary key (datum,sifravozila)
);
create table dummy
(
foobar integer not null primary key,
check (foobar = 1)
);
insert into dummy values (1);
/* then, i create few views to make summary report */
create view apromet(datum, so, vrsta, iznos)
as
select rt.datumtroska, sifraposlovnice, cast
(vrt.nazivvrt as varchar
(30)), cast (rt.iznos as numeric(18, 2))
from rezijskitrosak rt
left join vrstart vrt on rt.sifravrt = vrt.sifravrt
union all
select al.datum, sifraposlovnice, cast ('kancmat'
as varchar(30)),
cast(sum(alst.kolicina * alst.cena) as numeric(18, 2))
from alokacijast alst
left join alokacija al on alst.brdok=al.brdok
left join roba r on alst.sifrarobe = r.sifrarobe
where r.vrstarobe = 'km'
group by al.datum, sifraposlovnice
union all
select al.datum, sifraposlovnice, cast ('hemikalije'
as varchar(30)),
cast(sum(alst.kolicina * alst.cena) as numeric(18, 2))
from alokacijast alst
left join alokacija al on alst.brdok=al.brdok
left join roba r on alst.sifrarobe = | |
pix_size(*args):
"""pix_size(self) -> int"""
return _cv.CvMatrix_pix_size(*args)
def data(*args):
"""
data(self) -> uchar
data(self) -> uchar
"""
return _cv.CvMatrix_data(*args)
def step(*args):
"""step(self) -> int"""
return _cv.CvMatrix_step(*args)
def set_data(*args):
"""
set_data(self, void data, int step=0x7fffffff)
set_data(self, void data)
"""
return _cv.CvMatrix_set_data(*args)
def row(*args):
"""
row(self, int i) -> uchar
row(self, int i) -> uchar
"""
return _cv.CvMatrix_row(*args)
def asCvMat(*args):
"""asCvMat(self) -> CvMat"""
return _cv.CvMatrix_asCvMat(*args)
CvMatrix_swigregister = _cv.CvMatrix_swigregister
CvMatrix_swigregister(CvMatrix)
class CvModule(_object):
"""Proxy of C++ CvModule class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvModule, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvModule, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""__init__(self, CvModuleInfo _info) -> CvModule"""
this = _cv.new_CvModule(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvModule
__del__ = lambda self : None;
__swig_setmethods__["info"] = _cv.CvModule_info_set
__swig_getmethods__["info"] = _cv.CvModule_info_get
if _newclass:info = _swig_property(_cv.CvModule_info_get, _cv.CvModule_info_set)
__swig_setmethods__["first"] = _cv.CvModule_first_set
__swig_getmethods__["first"] = _cv.CvModule_first_get
if _newclass:first = _swig_property(_cv.CvModule_first_get, _cv.CvModule_first_set)
__swig_setmethods__["last"] = _cv.CvModule_last_set
__swig_getmethods__["last"] = _cv.CvModule_last_get
if _newclass:last = _swig_property(_cv.CvModule_last_get, _cv.CvModule_last_set)
CvModule_swigregister = _cv.CvModule_swigregister
CvModule_swigregister(CvModule)
class CvType(_object):
"""Proxy of C++ CvType class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvType, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvType, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, char type_name, CvIsInstanceFunc is_instance, CvReleaseFunc release=0,
CvReadFunc read=0, CvWriteFunc write=0,
CvCloneFunc clone=0) -> CvType
__init__(self, char type_name, CvIsInstanceFunc is_instance, CvReleaseFunc release=0,
CvReadFunc read=0, CvWriteFunc write=0) -> CvType
__init__(self, char type_name, CvIsInstanceFunc is_instance, CvReleaseFunc release=0,
CvReadFunc read=0) -> CvType
__init__(self, char type_name, CvIsInstanceFunc is_instance, CvReleaseFunc release=0) -> CvType
__init__(self, char type_name, CvIsInstanceFunc is_instance) -> CvType
"""
this = _cv.new_CvType(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvType
__del__ = lambda self : None;
__swig_setmethods__["info"] = _cv.CvType_info_set
__swig_getmethods__["info"] = _cv.CvType_info_get
if _newclass:info = _swig_property(_cv.CvType_info_get, _cv.CvType_info_set)
__swig_setmethods__["first"] = _cv.CvType_first_set
__swig_getmethods__["first"] = _cv.CvType_first_get
if _newclass:first = _swig_property(_cv.CvType_first_get, _cv.CvType_first_set)
__swig_setmethods__["last"] = _cv.CvType_last_set
__swig_getmethods__["last"] = _cv.CvType_last_get
if _newclass:last = _swig_property(_cv.CvType_last_get, _cv.CvType_last_set)
CvType_swigregister = _cv.CvType_swigregister
CvType_swigregister(CvType)
class CvMoments(_object):
"""Proxy of C++ CvMoments class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvMoments, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvMoments, name)
__repr__ = _swig_repr
__swig_setmethods__["m00"] = _cv.CvMoments_m00_set
__swig_getmethods__["m00"] = _cv.CvMoments_m00_get
if _newclass:m00 = _swig_property(_cv.CvMoments_m00_get, _cv.CvMoments_m00_set)
__swig_setmethods__["m10"] = _cv.CvMoments_m10_set
__swig_getmethods__["m10"] = _cv.CvMoments_m10_get
if _newclass:m10 = _swig_property(_cv.CvMoments_m10_get, _cv.CvMoments_m10_set)
__swig_setmethods__["m01"] = _cv.CvMoments_m01_set
__swig_getmethods__["m01"] = _cv.CvMoments_m01_get
if _newclass:m01 = _swig_property(_cv.CvMoments_m01_get, _cv.CvMoments_m01_set)
__swig_setmethods__["m20"] = _cv.CvMoments_m20_set
__swig_getmethods__["m20"] = _cv.CvMoments_m20_get
if _newclass:m20 = _swig_property(_cv.CvMoments_m20_get, _cv.CvMoments_m20_set)
__swig_setmethods__["m11"] = _cv.CvMoments_m11_set
__swig_getmethods__["m11"] = _cv.CvMoments_m11_get
if _newclass:m11 = _swig_property(_cv.CvMoments_m11_get, _cv.CvMoments_m11_set)
__swig_setmethods__["m02"] = _cv.CvMoments_m02_set
__swig_getmethods__["m02"] = _cv.CvMoments_m02_get
if _newclass:m02 = _swig_property(_cv.CvMoments_m02_get, _cv.CvMoments_m02_set)
__swig_setmethods__["m30"] = _cv.CvMoments_m30_set
__swig_getmethods__["m30"] = _cv.CvMoments_m30_get
if _newclass:m30 = _swig_property(_cv.CvMoments_m30_get, _cv.CvMoments_m30_set)
__swig_setmethods__["m21"] = _cv.CvMoments_m21_set
__swig_getmethods__["m21"] = _cv.CvMoments_m21_get
if _newclass:m21 = _swig_property(_cv.CvMoments_m21_get, _cv.CvMoments_m21_set)
__swig_setmethods__["m12"] = _cv.CvMoments_m12_set
__swig_getmethods__["m12"] = _cv.CvMoments_m12_get
if _newclass:m12 = _swig_property(_cv.CvMoments_m12_get, _cv.CvMoments_m12_set)
__swig_setmethods__["m03"] = _cv.CvMoments_m03_set
__swig_getmethods__["m03"] = _cv.CvMoments_m03_get
if _newclass:m03 = _swig_property(_cv.CvMoments_m03_get, _cv.CvMoments_m03_set)
__swig_setmethods__["mu20"] = _cv.CvMoments_mu20_set
__swig_getmethods__["mu20"] = _cv.CvMoments_mu20_get
if _newclass:mu20 = _swig_property(_cv.CvMoments_mu20_get, _cv.CvMoments_mu20_set)
__swig_setmethods__["mu11"] = _cv.CvMoments_mu11_set
__swig_getmethods__["mu11"] = _cv.CvMoments_mu11_get
if _newclass:mu11 = _swig_property(_cv.CvMoments_mu11_get, _cv.CvMoments_mu11_set)
__swig_setmethods__["mu02"] = _cv.CvMoments_mu02_set
__swig_getmethods__["mu02"] = _cv.CvMoments_mu02_get
if _newclass:mu02 = _swig_property(_cv.CvMoments_mu02_get, _cv.CvMoments_mu02_set)
__swig_setmethods__["mu30"] = _cv.CvMoments_mu30_set
__swig_getmethods__["mu30"] = _cv.CvMoments_mu30_get
if _newclass:mu30 = _swig_property(_cv.CvMoments_mu30_get, _cv.CvMoments_mu30_set)
__swig_setmethods__["mu21"] = _cv.CvMoments_mu21_set
__swig_getmethods__["mu21"] = _cv.CvMoments_mu21_get
if _newclass:mu21 = _swig_property(_cv.CvMoments_mu21_get, _cv.CvMoments_mu21_set)
__swig_setmethods__["mu12"] = _cv.CvMoments_mu12_set
__swig_getmethods__["mu12"] = _cv.CvMoments_mu12_get
if _newclass:mu12 = _swig_property(_cv.CvMoments_mu12_get, _cv.CvMoments_mu12_set)
__swig_setmethods__["mu03"] = _cv.CvMoments_mu03_set
__swig_getmethods__["mu03"] = _cv.CvMoments_mu03_get
if _newclass:mu03 = _swig_property(_cv.CvMoments_mu03_get, _cv.CvMoments_mu03_set)
__swig_setmethods__["inv_sqrt_m00"] = _cv.CvMoments_inv_sqrt_m00_set
__swig_getmethods__["inv_sqrt_m00"] = _cv.CvMoments_inv_sqrt_m00_get
if _newclass:inv_sqrt_m00 = _swig_property(_cv.CvMoments_inv_sqrt_m00_get, _cv.CvMoments_inv_sqrt_m00_set)
def __init__(self, *args):
"""__init__(self) -> CvMoments"""
this = _cv.new_CvMoments(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvMoments
__del__ = lambda self : None;
CvMoments_swigregister = _cv.CvMoments_swigregister
CvMoments_swigregister(CvMoments)
class CvHuMoments(_object):
"""Proxy of C++ CvHuMoments class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvHuMoments, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvHuMoments, name)
__repr__ = _swig_repr
__swig_setmethods__["hu1"] = _cv.CvHuMoments_hu1_set
__swig_getmethods__["hu1"] = _cv.CvHuMoments_hu1_get
if _newclass:hu1 = _swig_property(_cv.CvHuMoments_hu1_get, _cv.CvHuMoments_hu1_set)
__swig_setmethods__["hu2"] = _cv.CvHuMoments_hu2_set
__swig_getmethods__["hu2"] = _cv.CvHuMoments_hu2_get
if _newclass:hu2 = _swig_property(_cv.CvHuMoments_hu2_get, _cv.CvHuMoments_hu2_set)
__swig_setmethods__["hu3"] = _cv.CvHuMoments_hu3_set
__swig_getmethods__["hu3"] = _cv.CvHuMoments_hu3_get
if _newclass:hu3 = _swig_property(_cv.CvHuMoments_hu3_get, _cv.CvHuMoments_hu3_set)
__swig_setmethods__["hu4"] = _cv.CvHuMoments_hu4_set
__swig_getmethods__["hu4"] = _cv.CvHuMoments_hu4_get
if _newclass:hu4 = _swig_property(_cv.CvHuMoments_hu4_get, _cv.CvHuMoments_hu4_set)
__swig_setmethods__["hu5"] = _cv.CvHuMoments_hu5_set
__swig_getmethods__["hu5"] = _cv.CvHuMoments_hu5_get
if _newclass:hu5 = _swig_property(_cv.CvHuMoments_hu5_get, _cv.CvHuMoments_hu5_set)
__swig_setmethods__["hu6"] = _cv.CvHuMoments_hu6_set
__swig_getmethods__["hu6"] = _cv.CvHuMoments_hu6_get
if _newclass:hu6 = _swig_property(_cv.CvHuMoments_hu6_get, _cv.CvHuMoments_hu6_set)
__swig_setmethods__["hu7"] = _cv.CvHuMoments_hu7_set
__swig_getmethods__["hu7"] = _cv.CvHuMoments_hu7_get
if _newclass:hu7 = _swig_property(_cv.CvHuMoments_hu7_get, _cv.CvHuMoments_hu7_set)
def __init__(self, *args):
"""__init__(self) -> CvHuMoments"""
this = _cv.new_CvHuMoments(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvHuMoments
__del__ = lambda self : None;
CvHuMoments_swigregister = _cv.CvHuMoments_swigregister
CvHuMoments_swigregister(CvHuMoments)
class CvConnectedComp(_object):
"""Proxy of C++ CvConnectedComp class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvConnectedComp, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvConnectedComp, name)
__repr__ = _swig_repr
__swig_setmethods__["area"] = _cv.CvConnectedComp_area_set
__swig_getmethods__["area"] = _cv.CvConnectedComp_area_get
if _newclass:area = _swig_property(_cv.CvConnectedComp_area_get, _cv.CvConnectedComp_area_set)
__swig_setmethods__["value"] = _cv.CvConnectedComp_value_set
__swig_getmethods__["value"] = _cv.CvConnectedComp_value_get
if _newclass:value = _swig_property(_cv.CvConnectedComp_value_get, _cv.CvConnectedComp_value_set)
__swig_setmethods__["rect"] = _cv.CvConnectedComp_rect_set
__swig_getmethods__["rect"] = _cv.CvConnectedComp_rect_get
if _newclass:rect = _swig_property(_cv.CvConnectedComp_rect_get, _cv.CvConnectedComp_rect_set)
__swig_setmethods__["contour"] = _cv.CvConnectedComp_contour_set
__swig_getmethods__["contour"] = _cv.CvConnectedComp_contour_get
if _newclass:contour = _swig_property(_cv.CvConnectedComp_contour_get, _cv.CvConnectedComp_contour_set)
def __init__(self, *args):
"""__init__(self) -> CvConnectedComp"""
this = _cv.new_CvConnectedComp(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvConnectedComp
__del__ = lambda self : None;
CvConnectedComp_swigregister = _cv.CvConnectedComp_swigregister
CvConnectedComp_swigregister(CvConnectedComp)
class CvChainPtReader(_object):
"""Proxy of C++ CvChainPtReader class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvChainPtReader, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvChainPtReader, name)
__repr__ = _swig_repr
__swig_setmethods__["header_size"] = _cv.CvChainPtReader_header_size_set
__swig_getmethods__["header_size"] = _cv.CvChainPtReader_header_size_get
if _newclass:header_size = _swig_property(_cv.CvChainPtReader_header_size_get, _cv.CvChainPtReader_header_size_set)
__swig_setmethods__["seq"] = _cv.CvChainPtReader_seq_set
__swig_getmethods__["seq"] = _cv.CvChainPtReader_seq_get
if _newclass:seq = _swig_property(_cv.CvChainPtReader_seq_get, _cv.CvChainPtReader_seq_set)
__swig_setmethods__["block"] = _cv.CvChainPtReader_block_set
__swig_getmethods__["block"] = _cv.CvChainPtReader_block_get
if _newclass:block = _swig_property(_cv.CvChainPtReader_block_get, _cv.CvChainPtReader_block_set)
__swig_setmethods__["ptr"] = _cv.CvChainPtReader_ptr_set
__swig_getmethods__["ptr"] = _cv.CvChainPtReader_ptr_get
if _newclass:ptr = _swig_property(_cv.CvChainPtReader_ptr_get, _cv.CvChainPtReader_ptr_set)
__swig_setmethods__["block_min"] = _cv.CvChainPtReader_block_min_set
__swig_getmethods__["block_min"] = _cv.CvChainPtReader_block_min_get
if _newclass:block_min = _swig_property(_cv.CvChainPtReader_block_min_get, _cv.CvChainPtReader_block_min_set)
__swig_setmethods__["block_max"] = _cv.CvChainPtReader_block_max_set
__swig_getmethods__["block_max"] = _cv.CvChainPtReader_block_max_get
if _newclass:block_max = _swig_property(_cv.CvChainPtReader_block_max_get, _cv.CvChainPtReader_block_max_set)
__swig_setmethods__["delta_index"] = _cv.CvChainPtReader_delta_index_set
__swig_getmethods__["delta_index"] = _cv.CvChainPtReader_delta_index_get
if _newclass:delta_index = _swig_property(_cv.CvChainPtReader_delta_index_get, _cv.CvChainPtReader_delta_index_set)
__swig_setmethods__["prev_elem"] = _cv.CvChainPtReader_prev_elem_set
__swig_getmethods__["prev_elem"] = _cv.CvChainPtReader_prev_elem_get
if _newclass:prev_elem = _swig_property(_cv.CvChainPtReader_prev_elem_get, _cv.CvChainPtReader_prev_elem_set)
__swig_setmethods__["code"] = _cv.CvChainPtReader_code_set
__swig_getmethods__["code"] = _cv.CvChainPtReader_code_get
if _newclass:code = _swig_property(_cv.CvChainPtReader_code_get, _cv.CvChainPtReader_code_set)
__swig_setmethods__["pt"] = _cv.CvChainPtReader_pt_set
__swig_getmethods__["pt"] = _cv.CvChainPtReader_pt_get
if _newclass:pt = _swig_property(_cv.CvChainPtReader_pt_get, _cv.CvChainPtReader_pt_set)
__swig_setmethods__["deltas"] = _cv.CvChainPtReader_deltas_set
__swig_getmethods__["deltas"] = _cv.CvChainPtReader_deltas_get
if _newclass:deltas = _swig_property(_cv.CvChainPtReader_deltas_get, _cv.CvChainPtReader_deltas_set)
def __init__(self, *args):
"""__init__(self) -> CvChainPtReader"""
this = _cv.new_CvChainPtReader(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvChainPtReader
__del__ = lambda self : None;
CvChainPtReader_swigregister = _cv.CvChainPtReader_swigregister
CvChainPtReader_swigregister(CvChainPtReader)
class CvContourTree(_object):
"""Proxy of C++ CvContourTree class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvContourTree, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvContourTree, name)
__repr__ = _swig_repr
__swig_setmethods__["flags"] = _cv.CvContourTree_flags_set
__swig_getmethods__["flags"] = _cv.CvContourTree_flags_get
if _newclass:flags = _swig_property(_cv.CvContourTree_flags_get, _cv.CvContourTree_flags_set)
__swig_setmethods__["header_size"] = _cv.CvContourTree_header_size_set
__swig_getmethods__["header_size"] = _cv.CvContourTree_header_size_get
if _newclass:header_size = _swig_property(_cv.CvContourTree_header_size_get, _cv.CvContourTree_header_size_set)
__swig_setmethods__["h_prev"] = _cv.CvContourTree_h_prev_set
__swig_getmethods__["h_prev"] = _cv.CvContourTree_h_prev_get
if _newclass:h_prev = _swig_property(_cv.CvContourTree_h_prev_get, _cv.CvContourTree_h_prev_set)
__swig_setmethods__["h_next"] = _cv.CvContourTree_h_next_set
__swig_getmethods__["h_next"] = _cv.CvContourTree_h_next_get
if _newclass:h_next = _swig_property(_cv.CvContourTree_h_next_get, _cv.CvContourTree_h_next_set)
__swig_setmethods__["v_prev"] = _cv.CvContourTree_v_prev_set
__swig_getmethods__["v_prev"] = _cv.CvContourTree_v_prev_get
if _newclass:v_prev = _swig_property(_cv.CvContourTree_v_prev_get, _cv.CvContourTree_v_prev_set)
__swig_setmethods__["v_next"] = _cv.CvContourTree_v_next_set
__swig_getmethods__["v_next"] = _cv.CvContourTree_v_next_get
if _newclass:v_next = _swig_property(_cv.CvContourTree_v_next_get, _cv.CvContourTree_v_next_set)
__swig_setmethods__["total"] = _cv.CvContourTree_total_set
__swig_getmethods__["total"] = _cv.CvContourTree_total_get
if _newclass:total = _swig_property(_cv.CvContourTree_total_get, _cv.CvContourTree_total_set)
__swig_setmethods__["elem_size"] = _cv.CvContourTree_elem_size_set
__swig_getmethods__["elem_size"] = _cv.CvContourTree_elem_size_get
if _newclass:elem_size = _swig_property(_cv.CvContourTree_elem_size_get, _cv.CvContourTree_elem_size_set)
__swig_setmethods__["block_max"] = _cv.CvContourTree_block_max_set
__swig_getmethods__["block_max"] = _cv.CvContourTree_block_max_get
if _newclass:block_max = _swig_property(_cv.CvContourTree_block_max_get, _cv.CvContourTree_block_max_set)
__swig_setmethods__["ptr"] = _cv.CvContourTree_ptr_set
__swig_getmethods__["ptr"] = _cv.CvContourTree_ptr_get
if _newclass:ptr = _swig_property(_cv.CvContourTree_ptr_get, _cv.CvContourTree_ptr_set)
__swig_setmethods__["delta_elems"] = _cv.CvContourTree_delta_elems_set
__swig_getmethods__["delta_elems"] = _cv.CvContourTree_delta_elems_get
if _newclass:delta_elems = _swig_property(_cv.CvContourTree_delta_elems_get, _cv.CvContourTree_delta_elems_set)
__swig_setmethods__["storage"] = _cv.CvContourTree_storage_set
__swig_getmethods__["storage"] = _cv.CvContourTree_storage_get
if _newclass:storage = _swig_property(_cv.CvContourTree_storage_get, _cv.CvContourTree_storage_set)
__swig_setmethods__["free_blocks"] = _cv.CvContourTree_free_blocks_set
__swig_getmethods__["free_blocks"] = _cv.CvContourTree_free_blocks_get
if _newclass:free_blocks = _swig_property(_cv.CvContourTree_free_blocks_get, _cv.CvContourTree_free_blocks_set)
__swig_setmethods__["first"] = _cv.CvContourTree_first_set
__swig_getmethods__["first"] = _cv.CvContourTree_first_get
if _newclass:first = _swig_property(_cv.CvContourTree_first_get, _cv.CvContourTree_first_set)
__swig_setmethods__["p1"] = _cv.CvContourTree_p1_set
__swig_getmethods__["p1"] = _cv.CvContourTree_p1_get
if _newclass:p1 = _swig_property(_cv.CvContourTree_p1_get, _cv.CvContourTree_p1_set)
__swig_setmethods__["p2"] = _cv.CvContourTree_p2_set
__swig_getmethods__["p2"] = _cv.CvContourTree_p2_get
if _newclass:p2 = _swig_property(_cv.CvContourTree_p2_get, _cv.CvContourTree_p2_set)
def __init__(self, *args):
"""__init__(self) -> CvContourTree"""
this = _cv.new_CvContourTree(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _cv.delete_CvContourTree
__del__ = lambda self : None;
CvContourTree_swigregister = _cv.CvContourTree_swigregister
CvContourTree_swigregister(CvContourTree)
class CvConvexityDefect(_object):
"""Proxy of C++ CvConvexityDefect class"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CvConvexityDefect, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CvConvexityDefect, name)
__repr__ = _swig_repr
__swig_setmethods__["start"] = _cv.CvConvexityDefect_start_set
__swig_getmethods__["start"] = _cv.CvConvexityDefect_start_get
if _newclass:start = _swig_property(_cv.CvConvexityDefect_start_get, _cv.CvConvexityDefect_start_set)
__swig_setmethods__["end"] = _cv.CvConvexityDefect_end_set
__swig_getmethods__["end"] = _cv.CvConvexityDefect_end_get
if _newclass:end = _swig_property(_cv.CvConvexityDefect_end_get, | |
<filename>feature_generation/seq2seq/seq2seq.py
"""
This file implements a RNN encoder-decoder model (also known as sequence-to-sequence models).
We made the choice not to implement an attention mechanism (which means that the decoder is allowed to have a 'peak' at the input).
The reason why is because we are not trying to maximize the output of the decoder but instead the feature selection process.
(http://suriyadeepan.github.io/2016-06-28-easy-seq2seq/)
We will use batch-major rather than time-major even though time-major is slightly more efficient
since it makes the feature extraction process a lot easier.
We will not be using bucketing because traces of the same webpage will have the same length.
Therefore every batch, we will most likely be training the seq2seq model on one webpage
! Does encoder share weights with decoder or not (Less computation vs natural (https://arxiv.org/pdf/1409.3215.pdf))
! Reverse traces (https://arxiv.org/pdf/1409.3215.pdf)
Hyperparameters to tune:
------------------------
- Learning rate
- Which cell to use (GRU vs LSTM) or a deep RNN architecture using `MultiRNNCell`
- Reversing traces
- Bidirectional encoder
- Other objective functions (such as MSE,...)
- Amount of encoder and decoder hidden states
"""
import numpy as np
import tensorflow as tf
from sys import stdout, path
from os import path as ospath
from tensorflow.contrib.rnn import LSTMStateTuple
path.append(ospath.dirname(ospath.dirname(ospath.abspath(__file__))))
import helpers
class Seq2SeqModel():
"""
Implements a sequence to sequence model for real values
Attributes:
- encoder_cell is the cell that will be used for encoding
(Should be part of `tf.nn.rnn_cell`)
- decoder cell is the cell used for decoding
(Should be part of `tf.nn.rnn_cell`)
- seq_width shows how many features each input in the sequence has
(For website fingerprinting this is only 2 (packet_size, incoming))
- batch_size
- bidirectional is a boolean value that determines whether the encoder is bidirectional or not
- reverse is also a boolean value that when if true, reversed the traces for training
"""
def __init__(self, encoder_cell, decoder_cell, seq_width, batch_size=100, bidirectional=False, reverse=False, saved_graph=None, sess=None, learning_rate=0.0006):
"""
@param saved_graph is a string, representing the path to the saved graph
"""
# Constants
self.PAD = 0
self.EOS = -1
self.reverse = reverse
self.seq_width = seq_width
self.batch_size = batch_size
self.learning_rate = learning_rate
self.bidirectional = bidirectional
self.encoder_cell = encoder_cell
self.decoder_cell = decoder_cell
self._make_graph()
if saved_graph is not None and sess is not None:
self.import_from_file(sess, saved_graph)
def _make_graph(self):
"""
Construct the graph
"""
self._init_placeholders()
self._init_encoder()
self._init_decoder()
self._init_train()
def _init_placeholders(self):
"""
The main placeholders used for the input data, and output
"""
# The usual format is: `[self.batch_size, max_sequence_length, self.seq_width]`
# But we define `max_sequence_length` as None to make it dynamic so we only need to pad
# each batch to the maximum sequence length
self.encoder_inputs = tf.placeholder(tf.float32,
[self.batch_size, None, self.seq_width])
self.encoder_inputs_length = tf.placeholder(tf.int32, [self.batch_size])
self.decoder_targets = tf.placeholder(tf.float32,
[self.batch_size, None, self.seq_width])
def _init_encoder(self):
"""
Creates the encoder attributes
Attributes:
- encoder_outputs is shaped [max_sequence_length, batch_size, seq_width]
(since time-major == True)
- encoder_final_state is shaped [batch_size, encoder_cell.state_size]
"""
if not self.bidirectional:
with tf.variable_scope('Encoder') as scope:
self.encoder_outputs, self.encoder_final_state = tf.nn.dynamic_rnn(
cell=self.encoder_cell,
dtype=tf.float32,
sequence_length=self.encoder_inputs_length,
inputs=self.encoder_inputs,
time_major=False)
else:
((encoder_fw_outputs,
encoder_bw_outputs),
(encoder_fw_final_state,
encoder_bw_final_state)) = (
tf.nn.bidirectional_dynamic_rnn(cell_fw=self.encoder_cell,
cell_bw=self.encoder_cell,
inputs=self.encoder_inputs,
sequence_length=self.encoder_inputs_length,
dtype=tf.float32, time_major=False)
)
self.encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
if isinstance(encoder_fw_final_state, LSTMStateTuple):
encoder_final_state_c = tf.concat(
(encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat(
(encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
self.encoder_final_state = LSTMStateTuple(
c=encoder_final_state_c,
h=encoder_final_state_h
)
else:
self.encoder_final_state = tf.concat(
(encoder_fw_final_state, encoder_bw_final_state), 1)
def _init_decoder(self):
"""
Creates decoder attributes.
We cannot simply use a dynamic_rnn since we are feeding the outputs of the
decoder back into the inputs.
Therefore we use a raw_rnn and emulate a dynamic_rnn with this behavior.
(https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn.py)
"""
# EOS token added
self.decoder_inputs_length = self.encoder_inputs_length + 1
def loop_fn_initial(time, cell_output, cell_state, loop_state):
elements_finished = (time >= self.decoder_inputs_length)
# EOS token (0 + self.EOS)
initial_input = tf.zeros([self.batch_size, self.decoder_cell.output_size], dtype=tf.float32) + self.EOS
initial_cell_state = self.encoder_final_state
initial_loop_state = None # we don't need to pass any additional information
return (elements_finished,
initial_input,
initial_cell_state,
None, # cell output is dummy here
initial_loop_state)
def loop_fn(time, cell_output, cell_state, loop_state):
if cell_output is None: # time == 0
return loop_fn_initial(time, cell_output, cell_state, loop_state)
cell_output.set_shape([self.batch_size, self.decoder_cell.output_size])
emit_output = cell_output
next_cell_state = cell_state
elements_finished = (time >= self.decoder_inputs_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([self.batch_size, self.decoder_cell.output_size], dtype=tf.float32), # self.PAD
lambda: cell_output # Use the input from the previous cell
)
next_loop_state = None
return (
elements_finished,
next_input,
next_cell_state,
emit_output,
next_loop_state
)
decoder_outputs_ta, decoder_final_state, _ = tf.nn.raw_rnn(self.decoder_cell, loop_fn)
self.decoder_outputs = decoder_outputs_ta.stack()
self.decoder_outputs = tf.transpose(self.decoder_outputs, [1, 0, 2])
with tf.variable_scope('DecoderOutputProjection') as scope:
self.decoder_outputs = self.projection(self.decoder_outputs, self.seq_width, scope)
def _init_train(self):
self.loss = tf.reduce_sum(tf.square(self.decoder_targets - self.decoder_outputs))
# Which optimizer to use? `GradientDescentOptimizer`, `AdamOptimizer` or `RMSProp`?
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
def projection(self, inputs, projection_size, scope):
"""
Projects the input with a known amount of features to a `projection_size amount of features`
@param inputs is shaped like [time, batch, input_size] or [batch, input_size]
@param projection_size int32
@param scope outer variable scope
"""
input_size = inputs.get_shape()[-1].value
with tf.variable_scope(scope) as scope:
W = tf.get_variable(name='W', shape=[input_size, projection_size],
dtype=tf.float32)
b = tf.get_variable(name='b', shape=[projection_size],
dtype=tf.float32,
initializer=tf.constant_initializer(0, dtype=tf.float32))
input_shape = tf.unstack(tf.shape(inputs))
if len(input_shape) == 3:
time, batch, _ = input_shape # dynamic parts of shape
inputs = tf.reshape(inputs, [-1, input_size])
elif len(input_shape) == 2:
batch, _depth = input_shape
else:
raise ValueError("Weird input shape: {}".format(inputs))
linear = tf.add(tf.matmul(inputs, W), b)
if len(input_shape) == 3:
linear = tf.reshape(linear, [time, batch, projection_size])
return linear
def next_batch(self, batches, in_memory, max_time_diff=float("inf")):
"""
Returns the next batch.
@param batches an iterator with all of the batches (
if in_memory == True:
in batch-major form without padding
else:
A list of paths to the files
)
@param in_memory is a boolean value
@param max_time_diff **(should only be defined if `in_memory == False`)**
specifies what the maximum time different between the first packet in the trace and the last one should be
@return if in_memory is False, returns a tuple of (dict, [paths], max_length) where paths is a list of paths for each batch
else it returns a dict for training
"""
batch = next(batches)
data_batch = batch
if not in_memory:
data_batch = [helpers.read_cell_file(path, max_time_diff=max_time_diff) for path in batch]
for i, cell in enumerate(data_batch):
data_batch[i] = [packet[0] * packet[1] for packet in cell]
data_batch, encoder_input_lengths_ = helpers.pad_traces(data_batch, reverse=self.reverse, seq_width=self.seq_width)
encoder_inputs_ = data_batch
decoder_targets_ = helpers.add_EOS(data_batch, encoder_input_lengths_)
train_dict = {
self.encoder_inputs: encoder_inputs_,
self.encoder_inputs_length: encoder_input_lengths_,
self.decoder_targets: decoder_targets_,
}
if not in_memory:
return (train_dict, batch, max(encoder_input_lengths_))
return train_dict
def save(self, sess, file_name):
"""
Save the model in a file
@param sess is the session
@param file_name is the file name without the extension
"""
saver = tf.train.Saver()
saver.save(sess, file_name)
# saver.export_meta_graph(filename=file_name + '.meta')
def import_from_file(self, sess, file_name):
"""
Imports the graph from a file
@param sess is the session
@param file_name is a string that represents the file name
without the extension
"""
# Get the graph
saver = tf.train.Saver()
# Restore the variables
saver.restore(sess, file_name)
def train_on_copy_task(sess, model, data,
batch_size=100,
max_batches=None,
batches_in_epoch=1000,
max_time_diff=float("inf"),
verbose=False):
"""
Train the `Seq2SeqModel` on a copy task
@param sess is a tensorflow session
@param model is the seq2seq model
@param data is the data (in batch-major form and not padded or a list of files (depending on `in_memory`))
"""
batches = helpers.get_batches(data, batch_size=batch_size)
loss_track = []
batches_in_data = len(data) // batch_size
if max_batches is None or batches_in_data < max_batches:
max_batches = batches_in_data - 1
try:
for batch in range(max_batches):
print("Batch {}/{}".format(batch, max_batches))
fd, _, length = model.next_batch(batches, False, max_time_diff)
_, l = sess.run([model.train_op, model.loss], fd)
loss_track.append(l / length)
if batch == 0 or batch % batches_in_epoch == 0:
model.save(sess, 'seq2seq_model')
helpers.save_object(loss_track, 'loss_track.pkl')
if verbose:
stdout.write(' minibatch loss: {}\n'.format(sess.run(model.loss, fd)))
predict_ = sess.run(model.decoder_outputs, fd)
for i, (inp, pred) in enumerate(zip(fd[model.encoder_inputs].swapaxes(0, 1), predict_.swapaxes(0, 1))):
stdout.write(' sample {}:\n'.format(i + 1))
stdout.write(' input > {}\n'.format(inp))
stdout.write(' predicted > {}\n'.format(pred))
if i >= 0:
break
stdout.write('\n')
except KeyboardInterrupt:
stdout.write('training interrupted')
model.save(sess, 'seq2seq_model')
exit(0)
model.save(sess, 'seq2seq_model')
helpers.save_object(loss_track, 'loss_track.pkl')
return loss_track
def get_vector_representations(sess, model, data, save_dir,
batch_size=100,
max_batches=None,
batches_in_epoch=1000,
max_time_diff=float("inf"),
extension=".cell"):
"""
Given a trained model, gets a vector representation for the traces in batch
@param sess is a tensorflow session
@param model is the seq2seq model
@param data is the data (in batch-major form and not padded or | |
# ===============================================================================
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import hashlib
# ============= standard library imports ========================
from datetime import datetime, timedelta
import six
from sqlalchemy import Date, distinct
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql.expression import and_, func, not_, cast as sql_cast
from sqlalchemy.sql.functions import count
# ============= enthought library imports =======================
from traits.api import provides
from pychron.core.utils import alpha_to_int
from pychron.database.core.database_adapter import DatabaseAdapter
# ============= local library imports ==========================
from pychron.database.core.functions import delete_one
from pychron.database.core.query import compile_query, in_func
from pychron.database.i_browser import IBrowser
# flux_
from pychron.database.orms.isotope.flux import flux_FluxTable, flux_HistoryTable, flux_MonitorTable
# gen_
from pychron.database.orms.isotope.gen import gen_LoadHolderTable, gen_DetectorTable, \
gen_ExtractionDeviceTable, gen_ProjectTable, \
gen_MolecularWeightTable, gen_MaterialTable, gen_MassSpectrometerTable, \
gen_SampleTable, gen_LabTable, gen_AnalysisTypeTable, gen_UserTable, \
gen_ImportTable, gen_SensitivityTable
# irrad_
from pychron.database.orms.isotope.irrad import irrad_HolderTable, irrad_ProductionTable, irrad_IrradiationTable, \
irrad_ChronologyTable, irrad_LevelTable, irrad_PositionTable
# loading_
from pychron.database.orms.isotope.loading import loading_LoadTable, loading_PositionsTable
# meas_
from pychron.database.orms.isotope.meas import meas_AnalysisTable, \
meas_ExperimentTable, meas_ExtractionTable, meas_IsotopeTable, meas_MeasurementTable, \
meas_SpectrometerParametersTable, meas_SpectrometerDeflectionsTable, \
meas_SignalTable, meas_PeakCenterTable, meas_PositionTable, \
meas_ScriptTable, meas_MonitorTable, meas_GainHistoryTable, meas_GainTable
# med_
from pychron.database.orms.isotope.med import med_ImageTable, med_SnapshotTable, med_SampleImageTable
# proc_
from pychron.database.orms.isotope.proc import proc_DetectorIntercalibrationHistoryTable, \
proc_DetectorIntercalibrationTable, proc_SelectedHistoriesTable, \
proc_BlanksTable, proc_BackgroundsTable, proc_BlanksHistoryTable, proc_BackgroundsHistoryTable, \
proc_IsotopeResultsTable, proc_FitHistoryTable, \
proc_FitTable, proc_DetectorParamTable, proc_NotesTable, proc_FigureTable, proc_FigureAnalysisTable, \
proc_FigurePrefTable, proc_TagTable, proc_ArArTable, proc_InterpretedAgeHistoryTable, proc_InterpretedAgeSetTable, \
proc_InterpretedAgeGroupHistoryTable, proc_InterpretedAgeGroupSetTable, proc_FigureLabTable, \
proc_SensitivityHistoryTable, proc_SensitivityTable, \
proc_AnalysisGroupTable, proc_AnalysisGroupSetTable, proc_DataReductionTagTable, proc_DataReductionTagSetTable, \
proc_BlanksSetValueTable, proc_ActionTable, proc_BlanksSetTable
# spec_
from pychron.database.orms.isotope.spec import spec_MassCalHistoryTable, spec_MassCalScanTable, spec_MFTableTable
from pychron.pychron_constants import NULL_STR
def binfunc(ds, hours):
ds = [dx[0] for dx in ds]
p1 = ds[0]
delta_seconds = hours * 3600
td = timedelta(seconds=delta_seconds * 0.25)
for i, di in enumerate(ds):
i = max(0, i - 1)
dd = ds[i]
if (di - dd).total_seconds() > delta_seconds:
yield p1 - td, dd + td
p1 = di
yield p1 - td, di + td
# class session(object):
# def __call__(self, f):
# def wrapped_f(obj, *args, **kw):
# with obj.session_ctx() as sess:
# kw['sess']=sess
# return f(obj, *args, **kw)
#
# return wrapped_f
@provides(IBrowser)
class IsotopeAdapter(DatabaseAdapter):
"""
new style adapter
be careful with super methods you use they may deprecate
using decorators is the new model
"""
# selector_klass = IsotopeAnalysisSelector
# @property
# def selector_klass(self):
# from pychron.database.selectors.isotope_selector import IsotopeAnalysisSelector
#
# return IsotopeAnalysisSelector
def get_mass_spectrometer_names(self):
return self._get_names(self.get_mass_spectrometers)
def _get_names(self, func):
names = []
with self.session_ctx():
eds = func()
if eds:
names = [e.name for e in eds]
return names
def get_extraction_device_names(self):
return self._get_names(self.get_extract_devices)
def get_flux_value(self, identifier, attr):
j = 0
with self.session_ctx():
dbln = self.db.get_labnumber(identifier)
if dbln and dbln.selected_flux_history:
f = dbln.selected_flux_history.flux
j = getattr(f, attr)
return j
def get_level_identifiers(self, irrad, level):
lns = []
with self.session_ctx():
level = self.get_irradiation_level(irrad, level)
if level:
lns = [str(pi.labnumber.identifier).strip()
for pi in level.positions if pi.labnumber.identifier]
lns = [li for li in lns if li]
lns = sorted(lns)
return lns
def get_irradiation_names(self, **kw):
names = []
with self.session_ctx():
ns = self.get_irradiations(**kw)
if ns:
names = [i.name for i in ns]
return names
def get_analysis_info(self, li):
with self.session_ctx():
dbln = self.get_labnumber(li)
if not dbln:
return None
else:
project, sample, material, irradiation, level, pos = '', '', '', '', '', 0
sample = dbln.sample
if sample:
if sample.project:
project = sample.project.name
if sample.material:
material = sample.material.name
sample = sample.name
dbpos = dbln.irradiation_position
if dbpos:
level = dbpos.level
irradiation = level.irradiation.name
level = level.name
pos = dbpos.position
return project, sample, material, irradiation, level, pos
def set_analysis_sensitivity(self, analysis, v, e):
hist = proc_SensitivityHistoryTable()
hist.analysis_id = analysis.id
self._add_item(hist)
sens = proc_SensitivityTable(value=float(v),
error=float(e))
hist.sensitivity = sens
self._add_item(sens)
analysis.selected_histories.selected_sensitivity = hist
def save_flux(self, identifier, v, e, inform=True):
with self.session_ctx():
dbln = self.get_labnumber(identifier)
if dbln:
dbpos = dbln.irradiation_position
dbhist = self.add_flux_history(dbpos)
dbflux = self.add_flux(float(v), float(e))
dbflux.history = dbhist
dbln.selected_flux_history = dbhist
msg = u'Flux for {} {} \u00b1{} saved to database'.format(identifier, v, e)
if inform:
self.information_dialog(msg)
else:
self.debug(msg)
def interpreted_age_factory(self, hi):
from pychron.database.interpreted_age import InterpretedAge
dbln = self.get_labnumber(hi.identifier)
sample = None
irrad = None
material = None
lithology = None
if dbln:
if dbln.sample:
lithology = dbln.sample.lithology
sample = dbln.sample.name
dbmat = dbln.sample.material
if dbmat:
material = dbmat.name
pos = dbln.irradiation_position
if pos:
level = pos.level
irrad = level.irradiation
irrad = '{}{} {}'.format(irrad.name, level.name, pos.position)
ia = hi.interpreted_age
if ia:
if ia.age_kind == 'Plateau':
n = len([x for x in ia.sets if x.plateau_step])
else:
n = len(ia.sets)
it = InterpretedAge(create_date=hi.create_date,
id=hi.id,
age=ia.age,
age_err=ia.age_err,
display_age_units=ia.display_age_units or 'Ma',
kca=ia.kca or 0,
kca_err=ia.kca_err or 0,
mswd=ia.mswd,
age_kind=ia.age_kind,
kca_kind=ia.kca_kind,
identifier=hi.identifier,
sample=sample or '',
irradiation=irrad or '',
material=material or '',
lithology=lithology or '',
nanalyses=n,
name='{} - {}'.format(hi.create_date, ia.age_kind))
return it
# ===========================================================================
# adders
# ===========================================================================
def add_data_reduction_tag(self, name, comment, user=None):
if user:
user = self.get_user(self.save_username)
obj = proc_DataReductionTagTable(name=name, comment=comment, user=user)
return self._add_item(obj)
def add_data_reduction_tag_set(self, dbtag, an, sh_id):
obj = proc_DataReductionTagSetTable()
obj.tag = dbtag
obj.analysis = an
obj.selected_histories_id = sh_id
def add_proc_action(self, msg, **kw):
obj = proc_ActionTable(action=msg, **kw)
return self._add_item(obj)
def add_mftable(self, specname, blob):
spec = self.get_mass_spectrometer(specname)
if spec is None:
self.warning('Invalid spectrometer: {}'.format(specname))
obj = spec_MFTableTable(blob=blob, spectrometer=spec)
self._add_item(obj)
def add_analysis_group(self, name, **kw):
obj = proc_AnalysisGroupTable(name=name, **kw)
self._add_item(obj)
return obj
def add_analysis_group_set(self, group, analysis, **kw):
obj = proc_AnalysisGroupSetTable(analysis_id=analysis.id, **kw)
self._add_item(obj)
if isinstance(group, six.integer_types):
obj.group_id = group
else:
group.analyses.append(obj)
return obj
def add_interpreted_age_group_history(self, name, project=None):
project = self.get_project(project)
if project:
obj = proc_InterpretedAgeGroupHistoryTable(name=name)
obj.project_id = project.id
self._add_item(obj)
return obj
def add_interpreted_age_group_set(self, hist, interpreted_age_id, **kw):
obj = proc_InterpretedAgeGroupSetTable(**kw)
obj.group = hist
obj.interpreted_age_id = interpreted_age_id
self._add_item(obj)
return obj
def add_history(self, dbrecord, kind, **kw):
func = getattr(self, 'add_{}_history'.format(kind))
history = func(dbrecord, user=self.save_username, **kw)
# history = db.add_blanks_history(dbrecord, user=db.save_username)
# set analysis' selected history
sh = self.add_selected_histories(dbrecord)
setattr(sh, 'selected_{}'.format(kind), history)
# db.sess.flush()
# sh.selected_blanks = history
return history
def add_mass_calibration_history(self, spectrometer):
spec = self.get_mass_spectrometer(spectrometer)
if spec:
hist = spec_MassCalHistoryTable()
hist.spectrometer_id = spec.id
self._add_item(hist)
return hist
def add_mass_calibration_scan(self, hist, iso, **kw):
s = spec_MassCalScanTable(**kw)
iso = self.get_molecular_weight(iso)
s.history_id = hist.id
s.molecular_weight_id = iso.id
self._add_item(s)
return s
def add_arar_history(self, analysis, **kw):
hist = self._add_history('ArAr', analysis, **kw)
return hist
def add_arar(self, hist, **kw):
a = proc_ArArTable(**kw)
hist.arar_result = a
self._add_item(a)
return a
def add_load(self, name, **kw):
l = loading_LoadTable(name=name, **kw)
self._add_item(l)
return l
def add_load_position(self, labnumber, **kw):
lp = loading_PositionsTable(**kw)
ln = self.get_labnumber(labnumber)
if ln:
lp.lab_identifier = ln.id
self._add_item(lp)
return lp
def add_tag(self, name, **kw):
tag = proc_TagTable(name=name, **kw)
return self._add_unique(tag, 'tag', name)
def add_import(self, **kw):
dbimport = gen_ImportTable(**kw)
self._add_item(dbimport)
return dbimport
def add_snapshot(self, path, **kw):
dbsnap = med_SnapshotTable(path, **kw)
self._add_item(dbsnap)
return dbsnap
def add_image(self, name, image=None):
if image is not None:
if not isinstance(image, str):
buf = StringIO()
image.save(buf)
image = buf.getvalue()
dbim = med_ImageTable(name=name, image=image)
self._add_item(dbim)
return dbim
def add_sample_image(self, sample_name, image_name, image, note, project=None, material=None, identifier=None):
with self.session_ctx():
sam = self.get_sample(sample_name, project, material, identifier)
obj = med_SampleImageTable(name=image_name, image=image, note=note)
obj.sample_id = sam.id
self._add_item(obj)
def add_monitor(self, analysis, **kw):
dbm = meas_MonitorTable(**kw)
analysis = self.get_analysis(analysis)
if analysis:
dbm.analysis = analysis
# dbm.analysis_id = analysis.id
# analysis.monitors.append(dbm)
self._add_item(dbm)
return dbm
def add_analysis_position(self, extraction, pos, **kw):
try:
pos = int(pos)
dbpos = meas_PositionTable(position=pos, **kw)
if extraction:
dbpos.extraction_id = extraction.id
# extraction.positions.append(dbpos)
self._add_item(dbpos)
return dbpos
except (ValueError, TypeError) as e:
pass
def add_note(self, analysis, note, **kw):
analysis = self.get_analysis(analysis)
obj = proc_NotesTable(note=note, user=self.save_username)
if analysis:
note.analysis = analysis
# analysis.notes.append(obj)
return obj
def add_interpreted_age_history(self, labnumber, **kw):
name = 'proc_InterpretedAgeHistoryTable'
table = self.__import_proctable(name)
labnumber = self.get_labnumber(labnumber)
t = table(identifier=labnumber.identifier, **kw)
labnumber.selected_interpreted_age = t
self._add_item(t)
return t
def add_interpreted_age(self, history, **kw):
return self._add_series_item('InterpretedAge', 'interpreted_age', history, **kw)
def add_interpreted_age_set(self, interpreted_age, analysis, **kw):
item = proc_InterpretedAgeSetTable(analysis=analysis,
interpreted_age_id=interpreted_age.id,
**kw)
return self._add_item(item)
# return self._add_set('InterpretedAge', 'interpreted_age',
# interpreted_age, analysis, **kw)
def add_blanks_history(self, analysis, **kw):
return self._add_history('Blanks', analysis, **kw)
def add_blanks(self, history, **kw):
return self._add_series_item('Blanks', 'blanks', history, **kw)
def add_blanks_set(self, analysis, **kw):
return self._add_set('Blanks', 'blank', analysis, **kw)
def add_blank_set_value_table(self, v, e, blank, analysis):
item = proc_BlanksSetValueTable(value=float(v), error=float(e))
dbitem = self._add_item(item)
dbitem.blank = blank
if isinstance(analysis, six.integer_types):
dbitem.analysis_id = analysis
else:
dbitem.analysis = analysis
return dbitem
def | |
and
returns them in form of a :class:`pandas.DataFrame` or a
:class:`numpy.ndarray`.
The mechanistic model is solved for the provided parameters and times,
and samples around this solution are drawn from the error models for
each time point.
The number of samples for each time point can be specified with
``n_samples``.
Parameters
----------
parameters
An array-like object with the parameter values of the predictive
model.
times
An array-like object with times at which the virtual "measurements"
are performed.
n_samples
The number of virtual "measurements" that are performed at each
time point. If ``None`` the biomarkers are measured only once
at each time point.
seed
A seed for the pseudo-random number generator or a
:class:`numpy.random.Generator`.
return_df
A boolean flag which determines whether the output is returned as a
:class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False``
the samples are returned as a numpy array of shape
``(n_outputs, n_times, n_samples)``.
include_regimen
A boolean flag which determines whether the dosing regimen
information is included in the output. If the samples are returned
as a :class:`numpy.ndarray`, the dosing information is not
included.
"""
parameters = np.asarray(parameters)
if len(parameters) != self._n_parameters:
raise ValueError(
'The length of parameters does not match n_parameters.')
# Sort times
times = np.sort(times)
# Sort parameters into mechanistic model params and error params
n_parameters = self._mechanistic_model.n_parameters()
mechanistic_params = parameters[:n_parameters]
error_params = parameters[n_parameters:]
# Solve mechanistic model
outputs = self._mechanistic_model.simulate(mechanistic_params, times)
# Create numpy container for samples
n_outputs = len(outputs)
n_times = len(times)
n_samples = n_samples if n_samples is not None else 1
container = np.empty(shape=(n_outputs, n_times, n_samples))
# Sample error around mechanistic model outputs
start_index = 0
for output_id, error_model in enumerate(self._error_models):
end_index = start_index + error_model.n_parameters()
# Sample
container[output_id, ...] = error_model.sample(
parameters=error_params[start_index:end_index],
model_output=outputs[output_id],
n_samples=n_samples,
seed=seed)
# Update start index
start_index = end_index
if return_df is False:
# Return samples in numpy array format
return container
# Structure samples in a pandas.DataFrame
output_names = self._mechanistic_model.outputs()
sample_ids = np.arange(start=1, stop=n_samples+1)
samples = pd.DataFrame(
columns=['ID', 'Biomarker', 'Time', 'Sample'])
# Fill in all samples at a specific time point at once
for output_id, name in enumerate(output_names):
for time_id, time in enumerate(times):
samples = samples.append(pd.DataFrame({
'ID': sample_ids,
'Time': time,
'Biomarker': name,
'Sample': container[output_id, time_id, :]}))
# Add dosing regimen information, if set
final_time = np.max(times)
regimen = self.get_dosing_regimen(final_time)
if (regimen is not None) and (include_regimen is True):
# Add dosing regimen for each sample
for _id in sample_ids:
regimen['ID'] = _id
samples = samples.append(regimen)
return samples
def set_dosing_regimen(
self, dose, start, duration=0.01, period=None, num=None):
"""
Sets the dosing regimen with which the compound is administered.
By default the dose is administered as a bolus injection (duration on
a time scale that is 100 fold smaller than the basic time unit). To
model an infusion of the dose over a longer time period, the
``duration`` can be adjusted to the appropriate time scale.
By default the dose is administered once. To apply multiple doses
provide a dose administration period.
.. note::
This method requires a :class:`MechanisticModel` that supports
compound administration.
Parameters
----------
dose
The amount of the compound that is injected at each administration.
start
Start time of the treatment.
duration
Duration of dose administration. For a bolus injection, a dose
duration of 1% of the time unit should suffice. By default the
duration is set to 0.01 (bolus).
period
Periodicity at which doses are administered. If ``None`` the dose
is administered only once.
num
Number of administered doses. If ``None`` and the periodicity of
the administration is not ``None``, doses are administered
indefinitely.
"""
try:
self._mechanistic_model.set_dosing_regimen(
dose, start, duration, period, num)
except AttributeError:
# This error means that the mechanistic model is a
# PharmacodynamicModel and therefore no dosing regimen can be set.
raise AttributeError(
'The mechanistic model does not support to set dosing '
'regimens. This may be because the underlying '
'chi.MechanisticModel is a '
'chi.PharmacodynamicModel.')
class PredictivePopulationModel(PredictiveModel):
r"""
Implements a model that predicts the change of observable biomarkers over
time in a population of patients or model organisms.
This model takes an instance of a :class:`PredictiveModel`, and one
instance of a :class:`PopulationModel` for each predictive model
parameter.
Formally, the distribution of measureable observables may be expressed as
.. math::
X \sim
\mathbb{P}\left( t; \psi _{\text{m}}, \psi _{\text{e}} \right)
\mathbb{P}\left(\psi _{\text{m}}, \psi _{\text{e}} | \theta \right),
where :math:`X` is a measureable biomarker value at time :math:`t`.
:math:`\mathbb{P}\left( t; \psi _{\text{m}}, \psi _{\text{e}} \right)` is
the predictive model for an individual patient defined by the predictive
model, and
:math:`\mathbb{P}\left(\psi _{\text{m}}, \psi _{\text{e}} | \theta \right)`
is the population distribution with parameters :math:`\theta`.
Extends :class:`PredictiveModel`.
Parameters
----------
predictive_model
An instance of a :class:`PredictiveModel`.
population_models
A list of :class:`PopulationModel` instances, one for each predictive
model parameter.
params
A list of the model parameters, which maps the population models to the
predictive model parameters. If ``None``, the population models are
assumed to be listed in the same order as the model parameters.
"""
def __init__(self, predictive_model, population_models, params=None):
# Check inputs
if not isinstance(predictive_model, chi.PredictiveModel):
raise TypeError(
'The predictive model has to be an instance of a '
'chi.PredictiveModel.')
for pop_model in population_models:
if not isinstance(pop_model, chi.PopulationModel):
raise TypeError(
'All population models have to be instances of a '
'chi.PopulationModel.')
# Get number and names of non-population predictive model
n_parameters = predictive_model.n_parameters()
parameter_names = predictive_model.get_parameter_names()
# Check that there is one population model for each model parameter
if len(population_models) != n_parameters:
raise ValueError(
'One population model has to be provided for each of the '
'<' + str(n_parameters) + '> mechanistic model-error '
'model parameters.')
# Sort population models according to model parameters
if params is not None:
# Check that params has the right length, and the correct
# parameter names
if len(params) != n_parameters:
raise ValueError(
'Params does not have the correct length. Params is '
'expected to contain all model parameter names.')
if sorted(parameter_names) != sorted(params):
raise ValueError(
'The parameter names in <params> have to coincide with '
'the parameter names of the non-populational predictive '
'model parameters <' + str(parameter_names) + '>.')
# Sort population models
pop_models = ['Place holder'] * n_parameters
for param_id, name in enumerate(params):
index = parameter_names.index(name)
pop_models[index] = population_models[param_id]
population_models = pop_models
# Remember predictive model and population models
self._predictive_model = predictive_model
self._population_models = [
copy.copy(pop_model) for pop_model in population_models]
# Set number and names of model parameters
self._set_population_parameter_names()
self._set_number_and_parameter_names()
def _set_population_parameter_names(self):
"""
Sets the names of the population model parameters.
For chi.HeterogeneousModel the bottom-level parameter is used
as model parameter name.
"""
# Get predictive model parameters
bottom_parameter_names = self._predictive_model.get_parameter_names()
# Construct model parameter names
for param_id, pop_model in enumerate(self._population_models):
# Get original population parameters
pop_model.set_parameter_names(None)
pop_params = pop_model.get_parameter_names()
# Construct parameter names
bottom_name = bottom_parameter_names[param_id]
if pop_params is not None:
names = [name + ' ' + bottom_name for name in pop_params]
else:
# If the population model has no parameter names,
# we keep the bottom name
names = [bottom_name]
# Update population model parameter names
pop_model.set_parameter_names(names)
def _set_number_and_parameter_names(self):
"""
Updates the number and names of the free model parameters.
"""
# Construct model parameter names
parameter_names = []
for pop_model in self._population_models:
# Get population parameter
pop_params = pop_model.get_parameter_names()
parameter_names += pop_params
# Update number and names
self._parameter_names = parameter_names
self._n_parameters = len(self._parameter_names)
def fix_parameters(self, name_value_dict):
"""
Fixes the value of model parameters, and effectively removes them as a
parameter from the model. Fixing the value of a parameter at ``None``,
sets the parameter free again.
.. note:
Parameters modelled by a :class:`HeterogeneousModel` cannot be
fixed on the population level. If you would like to fix the
associated parameter, fix it in the corresponding
:class:`PredictiveModel`.
Parameters
----------
name_value_dict
A dictionary with model parameter names as keys, and parameter
values as values.
"""
# Check type of dictionanry
try:
name_value_dict = dict(name_value_dict)
except (TypeError, ValueError):
raise ValueError(
'The name-value | |
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional
from xsdata.models.datatype import XmlDateTime, XmlDuration
from siri.siri.siri_requests_v2_0 import (
AbstractCapabilitiesStructure,
AbstractFunctionalServiceRequestStructure,
AbstractIdentifiedItemStructure,
AbstractReferencingItemStructure,
AbstractServiceCapabilitiesResponseStructure,
AbstractServiceDeliveryStructure,
AbstractSubscriptionStructure,
CapabilityRequestPolicyStructure,
CapabilitySubscriptionPolicyStructure,
ServiceCapabilitiesRequestStructure,
)
from siri.siri_model.siri_journey_support_v2_0 import FramedVehicleJourneyRefStructure
from siri.siri_model.siri_journey_v2_0 import ProgressBetweenStopsStructure
from siri.siri_model.siri_model_permissions_v2_0 import (
LinePermissions,
OperatorPermissions,
PermissionsStructure,
)
from siri.siri_model.siri_monitored_vehicle_journey_v2_0 import MonitoredVehicleJourneyStructure
from siri.siri_model.siri_reference_v2_0 import (
PublishedLineName,
VehicleModesEnumeration,
)
from siri.siri_utility.siri_permissions_v2_0 import (
AbstractPermissionStructure,
AbstractTopicPermissionStructure,
CapabilityAccessControlStructure,
)
from siri.siri_utility.siri_types_v2_0 import NaturalLanguageStringStructure
from siri.siri_utility.siri_utility_v1_1 import Extensions
__NAMESPACE__ = "http://www.siri.org.uk/siri"
class VehicleMonitoringDetailEnumeration(Enum):
"""
Detail Levels for Request.
:cvar MINIMUM: Return only the minimum amount of optional data for
each stop event to provide a display, A time, line name and
destination name.
:cvar BASIC: Return minimum and other available basic details for
each stop event. Do not include data on time at next stop or
destination.
:cvar NORMAL: Return all basic data, and also arrival times at
DESTINATION.
:cvar CALLS: Return all available data for each stop event,
including previous and onward CALLs with passing times for
JOURNEY PATTERN.
"""
MINIMUM = "minimum"
BASIC = "basic"
NORMAL = "normal"
CALLS = "calls"
@dataclass
class VehicleActivityCancellationStructure(AbstractReferencingItemStructure):
"""
Type for cancellation of an earlier Vehicle Activity.
:ivar vehicle_monitoring_ref:
:ivar vehicle_journey_ref: Reference to VEHICLE JOURNEY that VEHICLE
is making.
:ivar line_ref: Reference to a LINE.
:ivar direction_ref: Reference to a LINE DIRECTION DIRECTION,
typically outward or return.
:ivar journey_pattern_ref: Identifier of JOURNEY PATTERN that
journey follows.
:ivar journey_pattern_name: Name of Joruney Pattern
:ivar vehicle_mode: A means of transportation such as bus, rail,
etc.
:ivar route_ref: Identifier of ROUTE that journey follows.
:ivar published_line_name: Name or Number by which the LINE is known
to the public. (Unbounded since SIRI 2.0)
:ivar group_of_lines_ref: Reference to a GROUP OF LINEs to which
journey belongs. SIRI 2.0
:ivar direction_name: Description of the DIRECTION. May correspond
to a DESTINATION DISPLAY. (Unbounded since SIRI 2.0)
:ivar external_line_ref: Alternative identifier of LINE that an
external system may associate with journey.
:ivar reason: Reason for cancellation. (Unbounded since SIRI 2.0)
:ivar extensions:
"""
vehicle_monitoring_ref: Optional[str] = field(
default=None,
metadata={
"name": "VehicleMonitoringRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
vehicle_journey_ref: Optional[FramedVehicleJourneyRefStructure] = field(
default=None,
metadata={
"name": "VehicleJourneyRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
line_ref: Optional[str] = field(
default=None,
metadata={
"name": "LineRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
direction_ref: Optional[str] = field(
default=None,
metadata={
"name": "DirectionRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
journey_pattern_ref: Optional[str] = field(
default=None,
metadata={
"name": "JourneyPatternRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
journey_pattern_name: Optional[NaturalLanguageStringStructure] = field(
default=None,
metadata={
"name": "JourneyPatternName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
vehicle_mode: List[VehicleModesEnumeration] = field(
default_factory=list,
metadata={
"name": "VehicleMode",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
route_ref: Optional[str] = field(
default=None,
metadata={
"name": "RouteRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
published_line_name: List[PublishedLineName] = field(
default_factory=list,
metadata={
"name": "PublishedLineName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
group_of_lines_ref: Optional[str] = field(
default=None,
metadata={
"name": "GroupOfLinesRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
direction_name: List[NaturalLanguageStringStructure] = field(
default_factory=list,
metadata={
"name": "DirectionName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
external_line_ref: Optional[str] = field(
default=None,
metadata={
"name": "ExternalLineRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
reason: List[NaturalLanguageStringStructure] = field(
default_factory=list,
metadata={
"name": "Reason",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
extensions: Optional[Extensions] = field(
default=None,
metadata={
"name": "Extensions",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
@dataclass
class VehicleActivityStructure(AbstractIdentifiedItemStructure):
"""
Type for a Vehicle Activity.
:ivar valid_until_time: Time until when data is valid.
:ivar vehicle_monitoring_ref: Reference to monitored VEHICLE or
GROUP OF VEHICLEs.
:ivar monitoring_name: Name associated with Monitoring Reference.
Supports SIRI LITE servcies (+SIRI v2.0).
:ivar progress_between_stops: Provides information about the
progress of the VEHICLE along its current link, that is link
from previous visited top to current position.
:ivar monitored_vehicle_journey: Monitored VEHICLE JOURNEY that
VEHICLE is following.
:ivar vehicle_activity_note: Text associated with Delivery.
:ivar extensions:
"""
valid_until_time: Optional[XmlDateTime] = field(
default=None,
metadata={
"name": "ValidUntilTime",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
"required": True,
},
)
vehicle_monitoring_ref: Optional[str] = field(
default=None,
metadata={
"name": "VehicleMonitoringRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
monitoring_name: List[NaturalLanguageStringStructure] = field(
default_factory=list,
metadata={
"name": "MonitoringName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
progress_between_stops: Optional[ProgressBetweenStopsStructure] = field(
default=None,
metadata={
"name": "ProgressBetweenStops",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
monitored_vehicle_journey: Optional[MonitoredVehicleJourneyStructure] = field(
default=None,
metadata={
"name": "MonitoredVehicleJourney",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
"required": True,
},
)
vehicle_activity_note: List[NaturalLanguageStringStructure] = field(
default_factory=list,
metadata={
"name": "VehicleActivityNote",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
extensions: Optional[Extensions] = field(
default=None,
metadata={
"name": "Extensions",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
@dataclass
class VehicleMonitorPermissionStructure(AbstractTopicPermissionStructure):
"""
Type for MonitoringPoint Permission.
:ivar vehicle_monitoring_ref: Vehicle Monitoring reference for which
permission is made.
"""
vehicle_monitoring_ref: Optional[str] = field(
default=None,
metadata={
"name": "VehicleMonitoringRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
"required": True,
},
)
@dataclass
class VehicleMonitoringCapabilitiesRequest(ServiceCapabilitiesRequestStructure):
"""Request for information about Vehicle Monitoring Service Capabilities.
Answered with a VehicleMontoringCapabilitiesResponse.
"""
class Meta:
namespace = "http://www.siri.org.uk/siri"
@dataclass
class VehicleMonitoringCapabilityRequestPolicyStructure(CapabilityRequestPolicyStructure):
"""
Type for capability request policy.
:ivar has_references: Whether results should return references.
:ivar has_names: Whether results should return references.
"""
has_references: Optional[bool] = field(
default=None,
metadata={
"name": "HasReferences",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
has_names: Optional[bool] = field(
default=None,
metadata={
"name": "HasNames",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
@dataclass
class VehicleMonitoringRequestStructure(AbstractFunctionalServiceRequestStructure):
"""
Type for Functional Service Request for Vehicle Monitoring Service.
:ivar vehicle_monitoring_ref: A predefined scope for making VEHICLE
requests.
:ivar vehicle_ref: Reference to a specific VEHICLE about which data
is requested.
:ivar line_ref: Filter the results to include only vehicles for the
specific LINE.
:ivar direction_ref: Filter the results to include only VEHICLEs
going to this DIRECTION.
:ivar language: Preferred language in which to return text values.
:ivar include_translations:
:ivar maximum_vehicles: The maximum number of MONITORED VEHICLE
JOURNEYs to include in a given delivery. The most recent n
Events within the look ahead window are included.
:ivar vehicle_monitoring_detail_level: Level of detail to include in
response.
:ivar maximum_number_of_calls: If calls are to be returned, maximum
number of calls to include in response. If absent, exclude all
calls. +SIRI v2.0.
:ivar include_situations: Whether any related Situations should be
included in the ServiceDelivery. Default is 'false'. +SIRI v2.0
:ivar extensions:
:ivar version: Version number of request. Fixed
"""
vehicle_monitoring_ref: Optional[str] = field(
default=None,
metadata={
"name": "VehicleMonitoringRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
vehicle_ref: Optional[str] = field(
default=None,
metadata={
"name": "VehicleRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
line_ref: Optional[str] = field(
default=None,
metadata={
"name": "LineRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
direction_ref: Optional[str] = field(
default=None,
metadata={
"name": "DirectionRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
language: Optional[str] = field(
default=None,
metadata={
"name": "Language",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
include_translations: Optional[bool] = field(
default=None,
metadata={
"name": "IncludeTranslations",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
maximum_vehicles: Optional[int] = field(
default=None,
metadata={
"name": "MaximumVehicles",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
vehicle_monitoring_detail_level: Optional[VehicleMonitoringDetailEnumeration] = field(
default=None,
metadata={
"name": "VehicleMonitoringDetailLevel",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
maximum_number_of_calls: Optional["VehicleMonitoringRequestStructure.MaximumNumberOfCalls"] = field(
default=None,
metadata={
"name": "MaximumNumberOfCalls",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
include_situations: Optional[bool] = field(
default=None,
metadata={
"name": "IncludeSituations",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
extensions: Optional[Extensions] = field(
default=None,
metadata={
"name": "Extensions",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
version: str = field(
default="2.0",
metadata={
"type": "Attribute",
},
)
@dataclass
class MaximumNumberOfCalls:
"""
:ivar previous: Maximum number of previous calls to include.
Only applies if VehicleMonitoringDetailLevel of Calls
specified. Zero for none. If VehicleMonitoringDetailLevel of
Calls specified but MaximumNumberOfCalls.Previous absent,
include all previous calls. +SIRI v2.0.
:ivar onwards: Maximum number of onwards calls to include. Zero
for none. Only applies if VehicleMonitoringDetailLevel of
'calls' specified. Zero for none. If
VehicleMonitoringDetailLevel calls specified but
MaximumNumberOfCalls.Onwards absent, include all onwards
calls. +SIRI v2.0.
"""
previous: Optional[int] = field(
default=None,
metadata={
"name": "Previous",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
onwards: Optional[int] = field(
default=None,
metadata={
"name": "Onwards",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
@dataclass
class VehicleMonitoringServiceCapabilitiesStructure(AbstractCapabilitiesStructure):
"""
Type for Vehicle Monitoring Capabilities.
:ivar topic_filtering: Topic Filtering Capabilities.
:ivar request_policy: Request Policy capabilities.
:ivar subscription_policy: Subscription Policy capabilities.
:ivar access_control: Optional Access control capabilities.
:ivar response_features: Optional Response capabilities.
:ivar extensions:
"""
topic_filtering: Optional["VehicleMonitoringServiceCapabilitiesStructure.TopicFiltering"] = field(
default=None,
metadata={
"name": "TopicFiltering",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
request_policy: Optional["VehicleMonitoringServiceCapabilitiesStructure.RequestPolicy"] = field(
default=None,
metadata={
"name": "RequestPolicy",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
subscription_policy: Optional[CapabilitySubscriptionPolicyStructure] = | |
import itertools
from collections import defaultdict, deque
from .common import fail, warn
from .dataflow import DFNode
from .errors import Errors, Warnings
from .graph import Graph
from .latency import get_latency
from .irvisitor import IRVisitor
from .ir import *
from .irhelper import has_exclusive_function
from .latency import CALL_MINIMUM_STEP
from .utils import unique
from .scope import Scope
from logging import getLogger
logger = getLogger(__name__)
MAX_FUNC_UNIT = 100
class Scheduler(object):
def __init__(self):
self.done_blocks = []
def schedule(self, scope):
if scope.is_namespace() or scope.is_class() or scope.is_lib():
return
self.scope = scope
for dfg in self.scope.dfgs(bottom_up=True):
if dfg.parent and dfg.synth_params['scheduling'] == 'pipeline':
scheduler_impl = PipelineScheduler()
else:
scheduler_impl = BlockBoundedListScheduler()
scheduler_impl.schedule(scope, dfg)
class SchedulerImpl(object):
def __init__(self):
self.res_tables = {}
self.node_latency_map = {} # {node:(max, min, actual)}
self.node_seq_latency_map = {}
self.all_paths = []
self.res_extractor = None
def schedule(self, scope, dfg):
self.scope = scope
logger.log(0, '_schedule dfg')
sources = dfg.find_src()
for src in sources:
src.priority = -1
self.res_extractor = ResourceExtractor()
for node in sorted(dfg.traverse_nodes(dfg.succs, sources, [])):
self.res_extractor.current_node = node
self.res_extractor.visit(node.tag)
worklist = deque()
worklist.append((sources, 0))
while worklist:
nodes, prio = worklist.popleft()
for n in nodes:
succs, nextprio = self._set_priority(n, prio, dfg)
if succs:
succs = unique(succs)
worklist.append((succs, nextprio))
longest_latency = self._schedule(dfg)
if longest_latency > CALL_MINIMUM_STEP:
scope.asap_latency = longest_latency
else:
scope.asap_latency = CALL_MINIMUM_STEP
def _set_priority(self, node, prio, dfg):
if prio > node.priority:
node.priority = prio
logger.debug('update priority ... ' + str(node))
return (dfg.succs_without_back(node), prio + 1)
return (None, None)
def _node_sched_default(self, dfg, node):
preds = dfg.preds_without_back(node)
if preds:
defuse_preds = dfg.preds_typ_without_back(node, 'DefUse')
usedef_preds = dfg.preds_typ_without_back(node, 'UseDef')
seq_preds = dfg.preds_typ_without_back(node, 'Seq')
sched_times = []
if seq_preds:
latest_node = max(seq_preds, key=lambda p: p.end)
sched_times.append(latest_node.end)
if defuse_preds:
latest_node = max(defuse_preds, key=lambda p: p.end)
sched_times.append(latest_node.end)
if usedef_preds:
preds = usedef_preds
latest_node = max(preds, key=lambda p: p.begin)
sched_times.append(latest_node.begin)
if not sched_times:
latest_node = max(preds, key=lambda p: p.begin)
sched_times.append(latest_node.begin)
scheduled_time = max(sched_times)
if scheduled_time < 0:
scheduled_time = 0
else:
# source node
scheduled_time = 0
return scheduled_time
def _find_latest_alias(self, dfg, node):
stm = node.tag
if not stm.is_a([MOVE, PHIBase]):
return node
var = node.tag.dst.symbol() if node.tag.is_a(MOVE) else node.tag.var.symbol()
if not var.is_alias():
return node
succs = dfg.succs_typ_without_back(node, 'DefUse')
if not succs:
return node
nodes = [self._find_latest_alias(dfg, s) for s in succs]
latest_node = max(nodes, key=lambda p: p.end)
return latest_node
def _is_resource_full(self, res, scheduled_resources):
# TODO: Limiting resources by scheduler is a future task
#if isinstance(res, str):
# return len(scheduled_resources) >= MAX_FUNC_UNIT
#elif isinstance(res, Scope):
# return len(scheduled_resources) >= MAX_FUNC_UNIT
return 0
def _str_res(self, res):
if isinstance(res, str):
return res
elif isinstance(res, Scope):
return res.name
def _get_earliest_res_free_time(self, node, time, latency):
resources = self.res_extractor.ops[node].keys()
#TODO operator chaining?
#logger.debug(node)
#logger.debug(resources)
assert len(resources) <= 1
if resources:
res = list(resources)[0]
if res not in self.res_tables:
table = defaultdict(list)
self.res_tables[res] = table
else:
table = self.res_tables[res]
scheduled_resources = table[time]
if node in scheduled_resources:
#already scheduled
return time
while self._is_resource_full(res, scheduled_resources):
logger.debug("!!! resource {}'s slot '{}' is full !!!".
format(self._str_res(res), time))
assert False, 'Rescheduling due to lack of resources is not supported yet'
time += 1
scheduled_resources = table[time]
node.instance_num = len(scheduled_resources)
#logger.debug("{} is scheduled to {}, instance_num {}".
# format(node, time, node.instance_num))
# fill scheduled_resources table
n = latency if latency != 0 else 1
for i in range(n):
scheduled_resources = table[time + i]
scheduled_resources.append(node)
return time
def _calc_latency(self, dfg):
is_minimum = dfg.synth_params['cycle'] == 'minimum'
for node in dfg.get_priority_ordered_nodes():
def_l, seq_l = get_latency(node.tag)
if def_l == 0:
if is_minimum:
self.node_latency_map[node] = (0, 0, 0)
else:
if node.tag.is_a([MOVE, PHIBase]):
var = node.tag.dst.symbol() if node.tag.is_a(MOVE) else node.tag.var.symbol()
if var.is_condition():
self.node_latency_map[node] = (0, 0, 0)
else:
self.node_latency_map[node] = (1, 0, 1)
else:
self.node_latency_map[node] = (0, 0, 0)
else:
self.node_latency_map[node] = (def_l, def_l, def_l)
self.node_seq_latency_map[node] = seq_l
def _adjust_latency(self, paths, expected):
for path in paths:
path_latencies = []
for n in path:
m, _, _ = self.node_latency_map[n]
path_latencies.append(m)
path_latency = sum(path_latencies)
if expected > path_latency:
# we don't have to adjust latency
continue
diff = path_latency - expected
fixed = set()
succeeded = True
# try to reduce latency
while diff:
for i, n in enumerate(path):
if n in fixed:
continue
max_l, min_l, _ = self.node_latency_map[n]
if min_l < path_latencies[i]:
path_latencies[i] -= 1
self.node_latency_map[n] = (max_l, min_l, path_latencies[i])
diff -= 1
else:
fixed.add(n)
if len(fixed) == len(path):
# scheduling has failed
succeeded = False
break
if not succeeded:
return False, expected + diff
return True, expected
def _try_adjust_latency(self, dfg, expected):
for path in dfg.trace_all_paths(lambda n: dfg.succs_typ_without_back(n, 'DefUse')):
self.all_paths.append(path)
ret, actual = self._adjust_latency(self.all_paths, expected)
if not ret:
assert False, 'scheduling has failed. the cycle must be greater equal {}'.format(actual)
def _max_latency(self, paths):
max_latency = 0
for path in paths:
path_latencies = []
for n in path:
m, _, _ = self.node_latency_map[n]
path_latencies.append(m)
path_latency = sum(path_latencies)
if path_latency > max_latency:
max_latency = path_latency
return max_latency
def _remove_alias_if_needed(self, dfg):
for n in dfg.nodes:
if n not in self.node_latency_map:
continue
_, min_l, actual = self.node_latency_map[n]
if min_l == 0 and actual > 0:
for d in n.defs:
if d.is_alias():
d.del_tag('alias')
def _group_nodes_by_block(self, dfg):
block_nodes = defaultdict(list)
for node in dfg.get_priority_ordered_nodes():
block_nodes[node.tag.block].append(node)
return block_nodes
def _schedule_cycles(self, dfg):
self._calc_latency(dfg)
synth_cycle = dfg.synth_params['cycle']
if synth_cycle == 'any' or synth_cycle == 'minimum':
pass
elif synth_cycle.startswith('less:'):
extected_latency = int(synth_cycle[len('less:'):])
self._try_adjust_latency(dfg, extected_latency)
elif synth_cycle.startswith('greater:'):
assert False, 'Not Implement Yet'
else:
assert False
class BlockBoundedListScheduler(SchedulerImpl):
def _schedule(self, dfg):
self._schedule_cycles(dfg)
self._remove_alias_if_needed(dfg)
block_nodes = self._group_nodes_by_block(dfg)
longest_latency = 0
for block, nodes in block_nodes.items():
#latency = self._list_schedule(dfg, nodes)
latency = self._list_schedule_with_block_bound(dfg, nodes, block, 0)
if longest_latency < latency:
longest_latency = latency
return longest_latency
def _list_schedule(self, dfg, nodes):
while True:
next_candidates = set()
latency = 0
for n in sorted(nodes, key=lambda n: (n.priority, n.stm_index)):
scheduled_time = self._node_sched(dfg, n)
latency = get_latency(n.tag)
#detect resource conflict
scheduled_time = self._get_earliest_res_free_time(n, scheduled_time, latency)
n.begin = scheduled_time
n.end = n.begin + latency
#logger.debug('## SCHEDULED ## ' + str(n))
succs = dfg.succs_without_back(n)
next_candidates = next_candidates.union(succs)
latency = n.end
if next_candidates:
nodes = next_candidates
else:
break
return latency
def _list_schedule_with_block_bound(self, dfg, nodes, block, longest_latency):
while True:
next_candidates = set()
for n in sorted(nodes, key=lambda n: (n.priority, n.stm_index)):
if n.tag.block is not block:
continue
scheduled_time = self._node_sched_with_block_bound(dfg, n, block)
_, _, latency = self.node_latency_map[n]
#detect resource conflict
scheduled_time = self._get_earliest_res_free_time(n, scheduled_time, latency)
n.begin = scheduled_time
n.end = n.begin + latency
#logger.debug('## SCHEDULED ## ' + str(n))
succs = dfg.succs_without_back(n)
next_candidates = next_candidates.union(succs)
if longest_latency < n.end:
longest_latency = n.end
if next_candidates:
nodes = next_candidates
else:
break
return longest_latency
def _node_sched_with_block_bound(self, dfg, node, block):
preds = dfg.preds_without_back(node)
preds = [p for p in preds if p.tag.block is block]
logger.debug('scheduling for ' + str(node))
if preds:
defuse_preds = dfg.preds_typ_without_back(node, 'DefUse')
defuse_preds = [p for p in defuse_preds if p.tag.block is block]
usedef_preds = dfg.preds_typ_without_back(node, 'UseDef')
usedef_preds = [p for p in usedef_preds if p.tag.block is block]
seq_preds = dfg.preds_typ_without_back(node, 'Seq')
seq_preds = [p for p in seq_preds if p.tag.block is block]
sched_times = []
if seq_preds:
if node.tag.is_a([JUMP, CJUMP, MCJUMP]) or has_exclusive_function(node.tag):
latest_node = max(seq_preds, key=lambda p: p.end)
sched_time = latest_node.end
else:
latest_node = max(seq_preds, key=lambda p: (p.begin, p.end))
seq_latency = self.node_seq_latency_map[latest_node]
sched_time = latest_node.begin + seq_latency
sched_times.append(sched_time)
logger.debug('latest_node of seq_preds ' + str(latest_node))
logger.debug('schedtime ' + str(sched_time))
if defuse_preds:
latest_node = max(defuse_preds, key=lambda p: p.end)
logger.debug('latest_node of defuse_preds ' + str(latest_node))
sched_times.append(latest_node.end)
logger.debug('schedtime ' + str(latest_node.end))
if usedef_preds:
preds = [self._find_latest_alias(dfg, pred) for pred in usedef_preds]
latest_node = max(preds, key=lambda p: p.begin)
logger.debug('latest_node(begin) of usedef_preds ' + str(latest_node))
sched_times.append(latest_node.begin)
logger.debug('schedtime ' + str(latest_node.begin))
if not sched_times:
latest_node = max(preds, key=lambda p: p.begin)
sched_times.append(latest_node.begin)
scheduled_time = max(sched_times)
if scheduled_time < 0:
scheduled_time = 0
else:
# source node
scheduled_time = 0
return scheduled_time
class PipelineScheduler(SchedulerImpl):
def _schedule(self, dfg):
self._schedule_cycles(dfg)
self._schedule_ii(dfg)
self._remove_alias_if_needed(dfg)
self.d2c = {}
block_nodes = self._group_nodes_by_block(dfg)
longest_latency = 0
for block, nodes in block_nodes.items():
latency = self._list_schedule_for_pipeline(dfg, nodes, 0)
conflict_res_table = self._make_conflict_res_table(nodes)
if conflict_res_table:
logger.debug('before rescheduling')
for n in dfg.get_scheduled_nodes():
logger.debug(n)
latency = self._reschedule_for_conflict(dfg, conflict_res_table, latency)
if longest_latency < latency:
longest_latency = latency
self._fill_defuse_gap(dfg, nodes)
logger.debug('II = ' + str(dfg.ii))
return longest_latency
def _make_conflict_res_table(self, nodes):
conflict_res_table = defaultdict(list)
self._extend_conflict_res_table(conflict_res_table, nodes, self.res_extractor.mems)
self._extend_conflict_res_table(conflict_res_table, nodes, self.res_extractor.ports)
self._extend_conflict_res_table(conflict_res_table, nodes, self.res_extractor.regarrays)
return conflict_res_table
def _extend_conflict_res_table(self, table, | |
# coding: utf8
########################################################################
# #
# Control law : tau = P(q*-q^) + D(v*-v^) + tau_ff #
# #
########################################################################
from matplotlib import pyplot as plt
import pinocchio as pin
import numpy as np
import numpy.matlib as matlib
import tsid
import FootTrajectoryGenerator as ftg
import FootstepPlanner
import pybullet as pyb
import utils
import time
pin.switchToNumpyMatrix()
########################################################################
# Class for a PD with feed-forward Controller #
########################################################################
class controller:
""" Inverse Dynamics controller that take into account the dynamics of the quadruped to generate
actuator torques to apply on the ground the contact forces computed by the MPC (for feet in stance
phase) and to perform the desired footsteps (for feet in swing phase)
Args:
N_similation (int): maximum number of Inverse Dynamics iterations for the simulation
"""
def __init__(self, N_simulation, k_mpc, n_periods):
self.q_ref = np.array([[0.0, 0.0, 0.2027682, 0.0, 0.0, 0.0, 1.0,
0.0, 0.8, -1.6, 0, 0.8, -1.6,
0, -0.8, 1.6, 0, -0.8, 1.6]]).transpose()
self.qtsid = self.q_ref.copy()
self.vtsid = np.zeros((18, 1))
self.ades = np.zeros((18, 1))
self.error = False
self.verbose = True
# List with the names of all feet frames
self.foot_frames = ['FL_FOOT', 'FR_FOOT', 'HL_FOOT', 'HR_FOOT']
# Constraining the contacts
mu = 0.9 # friction coefficient
fMin = 1.0 # minimum normal force
fMax = 25.0 # maximum normal force
contactNormal = np.matrix([0., 0., 1.]).T # direction of the normal to the contact surface
# Coefficients of the posture task
kp_posture = 10.0 # proportionnal gain of the posture task
w_posture = 1.0 # weight of the posture task
# Coefficients of the contact tasks
kp_contact = 100.0 # proportionnal gain for the contacts
self.w_forceRef = 50.0 # weight of the forces regularization
self.w_reg_f = 50.0
# Coefficients of the foot tracking task
kp_foot = 100.0 # proportionnal gain for the tracking task
self.w_foot = 500.0 # weight of the tracking task
# Arrays to store logs
k_max_loop = N_simulation
self.f_pos = np.zeros((4, k_max_loop, 3))
self.f_vel = np.zeros((4, k_max_loop, 3))
self.f_acc = np.zeros((4, k_max_loop, 3))
self.f_pos_ref = np.zeros((4, k_max_loop, 3))
self.f_vel_ref = np.zeros((4, k_max_loop, 3))
self.f_acc_ref = np.zeros((4, k_max_loop, 3))
self.b_pos = np.zeros((k_max_loop, 6))
self.b_vel = np.zeros((k_max_loop, 6))
self.com_pos = np.zeros((k_max_loop, 3))
self.com_pos_ref = np.zeros((k_max_loop, 3))
self.c_forces = np.zeros((4, k_max_loop, 3))
self.h_ref_feet = np.zeros((k_max_loop, ))
self.goals = np.zeros((3, 4))
self.vgoals = np.zeros((3, 4))
self.agoals = np.zeros((3, 4))
self.mgoals = np.zeros((6, 4))
# Position of the shoulders in local frame
self.shoulders = np.array([[0.19, 0.19, -0.19, -0.19], [0.15005, -0.15005, 0.15005, -0.15005]])
self.footsteps = self.shoulders.copy()
self.memory_contacts = self.shoulders.copy()
# Foot trajectory generator
max_height_feet = 0.05
t_lock_before_touchdown = 0.05
self.ftgs = [ftg.Foot_trajectory_generator(max_height_feet, t_lock_before_touchdown) for i in range(4)]
# Which pair of feet is active (0 for [1, 2] and 1 for [0, 3])
self.pair = -1
# Number of TSID steps for 1 step of the MPC
self.k_mpc = k_mpc
# For update_feet_tasks function
self.dt = 0.001 # [s], time step
self.t1 = 0.14 # [s], duration of swing phase
# Rotation matrix
self.R = np.eye(3)
# Feedforward torques
self.tau_ff = np.zeros((12, 1))
# Torques sent to the robot
self.torques12 = np.zeros((12, 1))
self.tau = np.zeros((12, ))
self.ID_base = None # ID of base link
self.ID_feet = [None] * 4 # ID of feet links
# Footstep planner object
# self.fstep_planner = FootstepPlanner.FootstepPlanner(0.001, 32)
self.vu_m = np.zeros((6, 1))
self.t_stance = 0.16
self.T_gait = 0.32
self.n_periods = n_periods
self.h_ref = 0.235 - 0.01205385
self.t_swing = np.zeros((4, )) # Total duration of current swing phase for each foot
self.contacts_order = [0, 1, 2, 3]
# Parameter to enable/disable hybrid control
self.enable_hybrid_control = False
# Time since the start of the simulation
self.t = 0.0
########################################################################
# Definition of the Model and TSID problem #
########################################################################
# Set the paths where the urdf and srdf file of the robot are registered
modelPath = "/opt/openrobots/share/example-robot-data/robots"
urdf = modelPath + "/solo_description/robots/solo12.urdf"
srdf = modelPath + "/solo_description/srdf/solo.srdf"
vector = pin.StdVec_StdString()
vector.extend(item for item in modelPath)
# Create the robot wrapper from the urdf model (which has no free flyer) and add a free flyer
self.robot = tsid.RobotWrapper(urdf, vector, pin.JointModelFreeFlyer(), False)
self.model = self.robot.model()
# Creation of the Invverse Dynamics HQP problem using the robot
# accelerations (base + joints) and the contact forces
self.invdyn = tsid.InverseDynamicsFormulationAccForce("tsid", self.robot, False)
# Compute the problem data with a solver based on EiQuadProg
t = 0.0
self.invdyn.computeProblemData(t, self.qtsid, self.vtsid)
# Saving IDs for later
self.ID_base = self.model.getFrameId("base_link")
for i, name in enumerate(self.foot_frames):
self.ID_feet[i] = self.model.getFrameId(name)
# Store a frame object to avoid creating one each time
self.pos_foot = self.robot.framePosition(self.invdyn.data(), self.ID_feet[0])
#####################
# LEGS POSTURE TASK #
#####################
# Task definition (creating the task object)
self.postureTask = tsid.TaskJointPosture("task-posture", self.robot)
self.postureTask.setKp(kp_posture * matlib.ones(self.robot.nv-6).T) # Proportional gain
self.postureTask.setKd(2.0 * np.sqrt(kp_posture) * matlib.ones(self.robot.nv-6).T) # Derivative gain
# Add the task to the HQP with weight = w_posture, priority level = 1 (not real constraint)
# and a transition duration = 0.0
self.invdyn.addMotionTask(self.postureTask, w_posture, 1, 0.0)
# TSID Trajectory (creating the trajectory object and linking it to the task)
pin.loadReferenceConfigurations(self.model, srdf, False)
self.trajPosture = tsid.TrajectoryEuclidianConstant("traj_joint", self.q_ref[7:])
self.samplePosture = self.trajPosture.computeNext()
self.postureTask.setReference(self.samplePosture)
############
# CONTACTS #
############
self.contacts = 4*[None] # List to store the rigid contact tasks
for i, name in enumerate(self.foot_frames):
# Contact definition (creating the contact object)
self.contacts[i] = tsid.ContactPoint(name, self.robot, name, contactNormal, mu, fMin, fMax)
self.contacts[i].setKp((kp_contact * matlib.ones(3).T))
self.contacts[i].setKd((2.0 * np.sqrt(kp_contact) * matlib.ones(3).T))
self.contacts[i].useLocalFrame(False)
# Set the contact reference position
H_ref = self.robot.framePosition(self.invdyn.data(), self.ID_feet[i])
H_ref.translation = np.matrix(
[H_ref.translation[0, 0],
H_ref.translation[1, 0],
0.0]).T
self.contacts[i].setReference(H_ref)
# Regularization weight for the force tracking subtask
self.contacts[i].setRegularizationTaskWeightVector(
np.matrix([self.w_reg_f, self.w_reg_f, self.w_reg_f]).T)
# Adding the rigid contact after the reference contact force has been set
self.invdyn.addRigidContact(self.contacts[i], self.w_forceRef)
#######################
# FOOT TRACKING TASKS #
#######################
self.feetTask = 4*[None] # List to store the foot tracking tasks
mask = np.matrix([1.0, 1.0, 1.0, 0.0, 0.0, 0.0]).T
# Task definition (creating the task object)
for i_foot in range(4):
self.feetTask[i_foot] = tsid.TaskSE3Equality(
"foot_track_" + str(i_foot), self.robot, self.foot_frames[i_foot])
self.feetTask[i_foot].setKp(kp_foot * mask)
self.feetTask[i_foot].setKd(2.0 * np.sqrt(kp_foot) * mask)
self.feetTask[i_foot].setMask(mask)
self.feetTask[i_foot].useLocalFrame(False)
# The reference will be set later when the task is enabled
##########
# SOLVER #
##########
# Use EiquadprogFast solver
self.solver = tsid.SolverHQuadProgFast("qp solver")
# Resize the solver to fit the number of variables, equality and inequality constraints
self.solver.resize(self.invdyn.nVar, self.invdyn.nEq, self.invdyn.nIn)
def update_feet_tasks(self, k_loop, gait, looping, interface, ftps_Ids_deb):
"""Update the 3D desired position for feet in swing phase by using a 5-th order polynomial that lead them
to the desired position on the ground (computed by the footstep planner)
Args:
k_loop (int): number of time steps since the start of the current gait cycle
pair (int): the current pair of feet in swing phase, for a walking trot gait
looping (int): total number of time steps in one gait cycle
interface (Interface object): interface between the simulator and the MPC/InvDyn
ftps_Ids_deb (list): IDs of debug spheres in PyBullet
"""
# Indexes of feet in swing phase
feet = np.where(gait[0, 1:] == 0)[0]
if len(feet) == 0: # If no foot in swing phase
return 0
t0s = []
for i in feet: # For each foot in swing phase get remaining duration of the swing phase
# Index of the line containing the next stance phase
index = next((idx for idx, val in np.ndenumerate(gait[:, 1+i]) if (((val==1)))), [-1])[0]
remaining_iterations = np.cumsum(gait[:index, 0])[-1] * self.k_mpc - (k_loop % self.k_mpc)
# Compute total duration of current swing phase
i_iter = 1
self.t_swing[i] = gait[0, 0]
while gait[i_iter, 1+i] == 0:
self.t_swing[i] += gait[i_iter, 0]
i_iter += 1
i_iter = -1
while gait[i_iter, 1+i] == 0:
self.t_swing[i] += gait[i_iter, 0]
i_iter -= 1
self.t_swing[i] *= self.dt * self.k_mpc
t0s.append(np.round(self.t_swing[i] - remaining_iterations * self.dt, decimals=3))
# self.footsteps contains the target (x, y) positions for both feet in swing phase
for i in range(len(feet)):
i_foot = feet[i]
# Get desired 3D position, velocity and acceleration
if t0s[i] == 0.000:
[x0, dx0, ddx0, y0, dy0, ddy0, z0, dz0, ddz0, gx1, gy1] = (self.ftgs[i_foot]).get_next_foot(
interface.o_feet[0, i_foot], interface.ov_feet[0, i_foot], interface.oa_feet[0, i_foot],
interface.o_feet[1, i_foot], interface.ov_feet[1, i_foot], interface.oa_feet[1, i_foot],
self.footsteps[0, i_foot], self.footsteps[1, i_foot], t0s[i], | |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '7i97.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(1000, 851)
mainWindow.setWindowTitle("")
mainWindow.setStyleSheet("")
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setContentsMargins(8, 8, 8, 8)
self.verticalLayout.setSpacing(5)
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setFocusPolicy(QtCore.Qt.NoFocus)
self.tabWidget.setStyleSheet("QGroupBox {\n"
" font: bold;\n"
" border: 2px solid silver;\n"
" border-radius: 6px;\n"
" margin-top: 6px;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" top: -5px;\n"
" left: 7px;\n"
" padding: 0px 5px 0px 5px;\n"
"}")
self.tabWidget.setObjectName("tabWidget")
self.machine = QtWidgets.QWidget()
self.machine.setObjectName("machine")
self.gridLayout_49 = QtWidgets.QGridLayout(self.machine)
self.gridLayout_49.setContentsMargins(8, 8, 8, 8)
self.gridLayout_49.setSpacing(5)
self.gridLayout_49.setObjectName("gridLayout_49")
self.gridGroupBox = QtWidgets.QGroupBox(self.machine)
self.gridGroupBox.setStyleSheet("QGroupBox {\n"
" font: bold;\n"
" border: 2px solid silver;\n"
" border-radius: 6px;\n"
" margin-top: 6px;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 7px;\n"
" padding: 0px 5px 0px 5px;\n"
"}")
self.gridGroupBox.setObjectName("gridGroupBox")
self.gridLayout_46 = QtWidgets.QGridLayout(self.gridGroupBox)
self.gridLayout_46.setContentsMargins(9, 15, 9, 9)
self.gridLayout_46.setSpacing(5)
self.gridLayout_46.setObjectName("gridLayout_46")
self.label_2 = QtWidgets.QLabel(self.gridGroupBox)
self.label_2.setObjectName("label_2")
self.gridLayout_46.addWidget(self.label_2, 1, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.gridGroupBox)
self.label_3.setObjectName("label_3")
self.gridLayout_46.addWidget(self.label_3, 2, 0, 1, 1)
self.maxLinearVel = QtWidgets.QLineEdit(self.gridGroupBox)
self.maxLinearVel.setObjectName("maxLinearVel")
self.gridLayout_46.addWidget(self.maxLinearVel, 3, 1, 1, 1)
self.label_37 = QtWidgets.QLabel(self.gridGroupBox)
self.label_37.setObjectName("label_37")
self.gridLayout_46.addWidget(self.label_37, 3, 0, 1, 1)
self.label = QtWidgets.QLabel(self.gridGroupBox)
self.label.setObjectName("label")
self.gridLayout_46.addWidget(self.label, 0, 0, 1, 1)
self.label_10 = QtWidgets.QLabel(self.gridGroupBox)
self.label_10.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_10.setObjectName("label_10")
self.gridLayout_46.addWidget(self.label_10, 4, 0, 1, 1)
self.maxVelMinLB = QtWidgets.QLabel(self.gridGroupBox)
self.maxVelMinLB.setMinimumSize(QtCore.QSize(100, 0))
self.maxVelMinLB.setFrameShape(QtWidgets.QFrame.Box)
self.maxVelMinLB.setText("")
self.maxVelMinLB.setObjectName("maxVelMinLB")
self.gridLayout_46.addWidget(self.maxVelMinLB, 3, 2, 1, 1)
self.coordinatesLB = QtWidgets.QLabel(self.gridGroupBox)
self.coordinatesLB.setFrameShape(QtWidgets.QFrame.Box)
self.coordinatesLB.setText("")
self.coordinatesLB.setObjectName("coordinatesLB")
self.gridLayout_46.addWidget(self.coordinatesLB, 4, 1, 1, 2)
self.linearUnitsCB = QtWidgets.QComboBox(self.gridGroupBox)
self.linearUnitsCB.setObjectName("linearUnitsCB")
self.gridLayout_46.addWidget(self.linearUnitsCB, 2, 1, 1, 2)
self.pathLabel = QtWidgets.QLabel(self.gridGroupBox)
self.pathLabel.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.pathLabel.setText("")
self.pathLabel.setObjectName("pathLabel")
self.gridLayout_46.addWidget(self.pathLabel, 1, 1, 1, 2)
self.configName = QtWidgets.QLineEdit(self.gridGroupBox)
self.configName.setObjectName("configName")
self.gridLayout_46.addWidget(self.configName, 0, 1, 1, 2)
self.gridLayout_49.addWidget(self.gridGroupBox, 0, 0, 3, 1)
self.scrollArea = QtWidgets.QScrollArea(self.machine)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 958, 420))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.scrollAreaWidgetContents)
self.horizontalLayout.setContentsMargins(8, 8, 8, 8)
self.horizontalLayout.setSpacing(5)
self.horizontalLayout.setObjectName("horizontalLayout")
self.machinePTE = QtWidgets.QPlainTextEdit(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Courier")
self.machinePTE.setFont(font)
self.machinePTE.setObjectName("machinePTE")
self.horizontalLayout.addWidget(self.machinePTE)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout_49.addWidget(self.scrollArea, 4, 0, 1, 3)
self.label_292 = QtWidgets.QLabel(self.machine)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_292.setFont(font)
self.label_292.setObjectName("label_292")
self.gridLayout_49.addWidget(self.label_292, 5, 0, 1, 1)
self.gridGroupBox1 = QtWidgets.QGroupBox(self.machine)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gridGroupBox1.sizePolicy().hasHeightForWidth())
self.gridGroupBox1.setSizePolicy(sizePolicy)
self.gridGroupBox1.setMinimumSize(QtCore.QSize(350, 0))
self.gridGroupBox1.setStyleSheet("QGroupBox {\n"
" font: bold;\n"
" border: 2px solid silver;\n"
" border-radius: 6px;\n"
" margin-top: 6px;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 7px;\n"
" padding: 0px 5px 0px 5px;\n"
"}")
self.gridGroupBox1.setObjectName("gridGroupBox1")
self.gridLayout_26 = QtWidgets.QGridLayout(self.gridGroupBox1)
self.gridLayout_26.setContentsMargins(8, 15, 8, 8)
self.gridLayout_26.setSpacing(5)
self.gridLayout_26.setObjectName("gridLayout_26")
self.ipAddressCB = QtWidgets.QComboBox(self.gridGroupBox1)
self.ipAddressCB.setObjectName("ipAddressCB")
self.gridLayout_26.addWidget(self.ipAddressCB, 0, 1, 1, 1)
self.label_27 = QtWidgets.QLabel(self.gridGroupBox1)
self.label_27.setObjectName("label_27")
self.gridLayout_26.addWidget(self.label_27, 0, 0, 1, 1)
self.gridLayout_49.addWidget(self.gridGroupBox1, 0, 1, 3, 2)
self.gridGroupBox2 = QtWidgets.QGroupBox(self.machine)
self.gridGroupBox2.setStyleSheet("QGroupBox {\n"
" font: bold;\n"
" border: 2px solid silver;\n"
" border-radius: 6px;\n"
" margin-top: 6px;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 7px;\n"
" padding: 0px 5px 0px 5px;\n"
"}")
self.gridGroupBox2.setObjectName("gridGroupBox2")
self.gridLayout_29 = QtWidgets.QGridLayout(self.gridGroupBox2)
self.gridLayout_29.setContentsMargins(8, 15, 8, 8)
self.gridLayout_29.setSpacing(5)
self.gridLayout_29.setObjectName("gridLayout_29")
self.readPB = QtWidgets.QPushButton(self.gridGroupBox2)
self.readPB.setObjectName("readPB")
self.gridLayout_29.addWidget(self.readPB, 1, 2, 1, 1)
self.firmwareCB = QtWidgets.QComboBox(self.gridGroupBox2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.firmwareCB.sizePolicy().hasHeightForWidth())
self.firmwareCB.setSizePolicy(sizePolicy)
self.firmwareCB.setMinimumSize(QtCore.QSize(200, 0))
self.firmwareCB.setObjectName("firmwareCB")
self.gridLayout_29.addWidget(self.firmwareCB, 1, 0, 1, 1)
self.reloadPB = QtWidgets.QPushButton(self.gridGroupBox2)
self.reloadPB.setObjectName("reloadPB")
self.gridLayout_29.addWidget(self.reloadPB, 1, 4, 1, 1)
self.copyPB = QtWidgets.QPushButton(self.gridGroupBox2)
self.copyPB.setObjectName("copyPB")
self.gridLayout_29.addWidget(self.copyPB, 1, 6, 1, 1)
self.firmwarePinsPB = QtWidgets.QPushButton(self.gridGroupBox2)
self.firmwarePinsPB.setObjectName("firmwarePinsPB")
self.gridLayout_29.addWidget(self.firmwarePinsPB, 1, 1, 1, 1)
self.flashPB = QtWidgets.QPushButton(self.gridGroupBox2)
self.flashPB.setObjectName("flashPB")
self.gridLayout_29.addWidget(self.flashPB, 1, 3, 1, 1)
self.verifyPB = QtWidgets.QPushButton(self.gridGroupBox2)
self.verifyPB.setObjectName("verifyPB")
self.gridLayout_29.addWidget(self.verifyPB, 1, 5, 1, 1)
self.gridLayout_49.addWidget(self.gridGroupBox2, 3, 0, 1, 2)
self.groupBox_27 = QtWidgets.QGroupBox(self.machine)
self.groupBox_27.setObjectName("groupBox_27")
self.gridLayout_66 = QtWidgets.QGridLayout(self.groupBox_27)
self.gridLayout_66.setContentsMargins(8, 8, 8, 8)
self.gridLayout_66.setSpacing(5)
self.gridLayout_66.setObjectName("gridLayout_66")
self.backupCB = QtWidgets.QCheckBox(self.groupBox_27)
self.backupCB.setChecked(True)
self.backupCB.setObjectName("backupCB")
self.gridLayout_66.addWidget(self.backupCB, 0, 0, 1, 1)
self.gridLayout_49.addWidget(self.groupBox_27, 3, 2, 1, 1)
self.tabWidget.addTab(self.machine, "")
self.display = QtWidgets.QWidget()
self.display.setObjectName("display")
self.gridGroupBox3 = QtWidgets.QGroupBox(self.display)
self.gridGroupBox3.setGeometry(QtCore.QRect(500, 10, 211, 80))
self.gridGroupBox3.setStyleSheet("QGroupBox {\n"
" font: bold;\n"
" border: 2px solid silver;\n"
" border-radius: 6px;\n"
" margin-top: 6px;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 7px;\n"
" padding: 0px 5px 0px 5px;\n"
"}")
self.gridGroupBox3.setObjectName("gridGroupBox3")
self.gridLayout_32 = QtWidgets.QGridLayout(self.gridGroupBox3)
self.gridLayout_32.setContentsMargins(8, 8, 8, 8)
self.gridLayout_32.setSpacing(5)
self.gridLayout_32.setObjectName("gridLayout_32")
self.frontToolLatheCB = QtWidgets.QCheckBox(self.gridGroupBox3)
self.frontToolLatheCB.setObjectName("frontToolLatheCB")
self.gridLayout_32.addWidget(self.frontToolLatheCB, 0, 0, 1, 1)
self.backToolLatheCB = QtWidgets.QCheckBox(self.gridGroupBox3)
self.backToolLatheCB.setObjectName("backToolLatheCB")
self.gridLayout_32.addWidget(self.backToolLatheCB, 1, 0, 1, 1)
self.gridGroupBox4 = QtWidgets.QGroupBox(self.display)
self.gridGroupBox4.setGeometry(QtCore.QRect(10, 10, 441, 171))
self.gridGroupBox4.setStyleSheet("QGroupBox {\n"
" font: bold;\n"
" border: 2px solid silver;\n"
" border-radius: 6px;\n"
" margin-top: 6px;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" left: 7px;\n"
" padding: 0px 5px 0px 5px;\n"
"}")
self.gridGroupBox4.setObjectName("gridGroupBox4")
self.gridLayout_47 = QtWidgets.QGridLayout(self.gridGroupBox4)
self.gridLayout_47.setContentsMargins(9, 15, 9, 9)
self.gridLayout_47.setSpacing(5)
self.gridLayout_47.setObjectName("gridLayout_47")
self.positionOffsetCB = QtWidgets.QComboBox(self.gridGroupBox4)
self.positionOffsetCB.setObjectName("positionOffsetCB")
self.gridLayout_47.addWidget(self.positionOffsetCB, 1, 1, 1, 1)
self.positionFeedbackCB = QtWidgets.QComboBox(self.gridGroupBox4)
self.positionFeedbackCB.setObjectName("positionFeedbackCB")
self.gridLayout_47.addWidget(self.positionFeedbackCB, 2, 1, 1, 1)
self.maxFeedOverrideSB = QtWidgets.QDoubleSpinBox(self.gridGroupBox4)
self.maxFeedOverrideSB.setDecimals(1)
self.maxFeedOverrideSB.setMaximum(2.0)
self.maxFeedOverrideSB.setSingleStep(0.1)
self.maxFeedOverrideSB.setProperty("value", 1.2)
self.maxFeedOverrideSB.setObjectName("maxFeedOverrideSB")
self.gridLayout_47.addWidget(self.maxFeedOverrideSB, 3, 1, 1, 1)
self.label_40 = QtWidgets.QLabel(self.gridGroupBox4)
self.label_40.setObjectName("label_40")
self.gridLayout_47.addWidget(self.label_40, 0, 0, 1, 1)
self.label_41 = QtWidgets.QLabel(self.gridGroupBox4)
self.label_41.setObjectName("label_41")
self.gridLayout_47.addWidget(self.label_41, 1, 0, 1, 1)
self.guiCB = QtWidgets.QComboBox(self.gridGroupBox4)
self.guiCB.setObjectName("guiCB")
self.gridLayout_47.addWidget(self.guiCB, 0, 1, 1, 1)
self.label_42 = QtWidgets.QLabel(self.gridGroupBox4)
self.label_42.setObjectName("label_42")
self.gridLayout_47.addWidget(self.label_42, 2, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.gridGroupBox4)
self.label_9.setObjectName("label_9")
self.gridLayout_47.addWidget(self.label_9, 3, 0, 1, 1)
self.label_179 = QtWidgets.QLabel(self.gridGroupBox4)
self.label_179.setObjectName("label_179")
self.gridLayout_47.addWidget(self.label_179, 3, 2, 1, 1)
self.groupBox_60 = QtWidgets.QGroupBox(self.display)
self.groupBox_60.setGeometry(QtCore.QRect(10, 200, 270, 60))
self.groupBox_60.setObjectName("groupBox_60")
self.gridLayout_45 = QtWidgets.QGridLayout(self.groupBox_60)
self.gridLayout_45.setContentsMargins(8, 8, 8, 8)
self.gridLayout_45.setSpacing(5)
self.gridLayout_45.setObjectName("gridLayout_45")
self.editorCB = QtWidgets.QComboBox(self.groupBox_60)
self.editorCB.setObjectName("editorCB")
self.gridLayout_45.addWidget(self.editorCB, 0, 0, 1, 1)
self.groupBox_61 = QtWidgets.QGroupBox(self.display)
self.groupBox_61.setGeometry(QtCore.QRect(10, 270, 441, 60))
self.groupBox_61.setObjectName("groupBox_61")
self.gridLayout_107 = QtWidgets.QGridLayout(self.groupBox_61)
self.gridLayout_107.setContentsMargins(8, 8, 8, 8)
self.gridLayout_107.setSpacing(5)
self.gridLayout_107.setObjectName("gridLayout_107")
self.label_21 = QtWidgets.QLabel(self.groupBox_61)
self.label_21.setObjectName("label_21")
self.gridLayout_107.addWidget(self.label_21, 0, 0, 1, 1)
self.defaultJogSpeedDSB = QtWidgets.QDoubleSpinBox(self.groupBox_61)
self.defaultJogSpeedDSB.setDecimals(1)
self.defaultJogSpeedDSB.setSingleStep(0.1)
self.defaultJogSpeedDSB.setProperty("value", 0.5)
self.defaultJogSpeedDSB.setObjectName("defaultJogSpeedDSB")
self.gridLayout_107.addWidget(self.defaultJogSpeedDSB, 0, 1, 1, 1)
self.jogSpeedLB = QtWidgets.QLabel(self.groupBox_61)
self.jogSpeedLB.setFrameShape(QtWidgets.QFrame.Box)
self.jogSpeedLB.setText("")
self.jogSpeedLB.setObjectName("jogSpeedLB")
self.gridLayout_107.addWidget(self.jogSpeedLB, 0, 2, 1, 1)
self.tabWidget.addTab(self.display, "")
self.axisTab = QtWidgets.QWidget()
self.axisTab.setEnabled(True)
self.axisTab.setObjectName("axisTab")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.axisTab)
self.verticalLayout_2.setContentsMargins(8, 8, 8, 8)
self.verticalLayout_2.setSpacing(5)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.jointType_3 = QtWidgets.QTabWidget(self.axisTab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.jointType_3.sizePolicy().hasHeightForWidth())
self.jointType_3.setSizePolicy(sizePolicy)
self.jointType_3.setToolTip("")
self.jointType_3.setTabPosition(QtWidgets.QTabWidget.North)
self.jointType_3.setObjectName("jointType_3")
self.joint0tab = QtWidgets.QWidget()
self.joint0tab.setObjectName("joint0tab")
self.gridLayout_2 = QtWidgets.QGridLayout(self.joint0tab)
self.gridLayout_2.setContentsMargins(8, 8, 8, 8)
self.gridLayout_2.setSpacing(5)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBox_2 = QtWidgets.QGroupBox(self.joint0tab)
self.groupBox_2.setStyleSheet("")
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout.setContentsMargins(10, 10, 10, 10)
self.gridLayout.setSpacing(5)
self.gridLayout.setObjectName("gridLayout")
self.label_215 = QtWidgets.QLabel(self.groupBox_2)
self.label_215.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_215.setObjectName("label_215")
self.gridLayout.addWidget(self.label_215, 0, 0, 1, 1)
self.p_0 = QtWidgets.QLineEdit(self.groupBox_2)
self.p_0.setText("")
self.p_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.p_0.setObjectName("p_0")
self.gridLayout.addWidget(self.p_0, 0, 1, 1, 1)
self.label_214 = QtWidgets.QLabel(self.groupBox_2)
self.label_214.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_214.setObjectName("label_214")
self.gridLayout.addWidget(self.label_214, 0, 2, 1, 1)
self.deadband_0 = QtWidgets.QLineEdit(self.groupBox_2)
self.deadband_0.setText("")
self.deadband_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.deadband_0.setObjectName("deadband_0")
self.gridLayout.addWidget(self.deadband_0, 0, 3, 1, 1)
self.label_216 = QtWidgets.QLabel(self.groupBox_2)
self.label_216.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_216.setObjectName("label_216")
self.gridLayout.addWidget(self.label_216, 1, 0, 1, 1)
self.i_0 = QtWidgets.QLineEdit(self.groupBox_2)
self.i_0.setText("")
self.i_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.i_0.setObjectName("i_0")
self.gridLayout.addWidget(self.i_0, 1, 1, 1, 1)
self.label_221 = QtWidgets.QLabel(self.groupBox_2)
self.label_221.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_221.setObjectName("label_221")
self.gridLayout.addWidget(self.label_221, 1, 2, 1, 1)
self.bias_0 = QtWidgets.QLineEdit(self.groupBox_2)
self.bias_0.setText("")
self.bias_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.bias_0.setObjectName("bias_0")
self.gridLayout.addWidget(self.bias_0, 1, 3, 1, 1)
self.label_217 = QtWidgets.QLabel(self.groupBox_2)
self.label_217.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_217.setObjectName("label_217")
self.gridLayout.addWidget(self.label_217, 2, 0, 1, 1)
self.d_0 = QtWidgets.QLineEdit(self.groupBox_2)
self.d_0.setText("")
self.d_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.d_0.setObjectName("d_0")
self.gridLayout.addWidget(self.d_0, 2, 1, 1, 1)
self.label_222 = QtWidgets.QLabel(self.groupBox_2)
self.label_222.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_222.setObjectName("label_222")
self.gridLayout.addWidget(self.label_222, 2, 2, 1, 1)
self.maxOutput_0 = QtWidgets.QLineEdit(self.groupBox_2)
self.maxOutput_0.setText("")
self.maxOutput_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxOutput_0.setObjectName("maxOutput_0")
self.gridLayout.addWidget(self.maxOutput_0, 2, 3, 1, 1)
self.label_218 = QtWidgets.QLabel(self.groupBox_2)
self.label_218.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_218.setObjectName("label_218")
self.gridLayout.addWidget(self.label_218, 3, 0, 1, 1)
self.ff0_0 = QtWidgets.QLineEdit(self.groupBox_2)
self.ff0_0.setText("")
self.ff0_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ff0_0.setObjectName("ff0_0")
self.gridLayout.addWidget(self.ff0_0, 3, 1, 1, 1)
self.label_219 = QtWidgets.QLabel(self.groupBox_2)
self.label_219.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_219.setObjectName("label_219")
self.gridLayout.addWidget(self.label_219, 4, 0, 1, 1)
self.ff1_0 = QtWidgets.QLineEdit(self.groupBox_2)
self.ff1_0.setText("")
self.ff1_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ff1_0.setObjectName("ff1_0")
self.gridLayout.addWidget(self.ff1_0, 4, 1, 1, 1)
self.label_220 = QtWidgets.QLabel(self.groupBox_2)
self.label_220.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_220.setObjectName("label_220")
self.gridLayout.addWidget(self.label_220, 5, 0, 1, 1)
self.ff2_0 = QtWidgets.QLineEdit(self.groupBox_2)
self.ff2_0.setText("")
self.ff2_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ff2_0.setObjectName("ff2_0")
self.gridLayout.addWidget(self.ff2_0, 5, 1, 1, 1)
self.pidDefault_0 = QtWidgets.QPushButton(self.groupBox_2)
self.pidDefault_0.setObjectName("pidDefault_0")
self.gridLayout.addWidget(self.pidDefault_0, 5, 3, 1, 1)
self.label_183 = QtWidgets.QLabel(self.groupBox_2)
self.label_183.setObjectName("label_183")
self.gridLayout.addWidget(self.label_183, 3, 2, 1, 1)
self.maxError_0 = QtWidgets.QLineEdit(self.groupBox_2)
self.maxError_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxError_0.setObjectName("maxError_0")
self.gridLayout.addWidget(self.maxError_0, 3, 3, 1, 1)
self.gridLayout_2.addWidget(self.groupBox_2, 1, 0, 1, 1)
self.jointAxisGroup_0 = QtWidgets.QGroupBox(self.joint0tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.jointAxisGroup_0.sizePolicy().hasHeightForWidth())
self.jointAxisGroup_0.setSizePolicy(sizePolicy)
self.jointAxisGroup_0.setStyleSheet("")
self.jointAxisGroup_0.setObjectName("jointAxisGroup_0")
self.gridLayout_4 = QtWidgets.QGridLayout(self.jointAxisGroup_0)
self.gridLayout_4.setContentsMargins(10, 10, 10, 10)
self.gridLayout_4.setSpacing(5)
self.gridLayout_4.setObjectName("gridLayout_4")
self.minLimit_0 = QtWidgets.QLineEdit(self.jointAxisGroup_0)
self.minLimit_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.minLimit_0.setObjectName("minLimit_0")
self.gridLayout_4.addWidget(self.minLimit_0, 2, 3, 1, 1)
self.maxLimit_0 = QtWidgets.QLineEdit(self.jointAxisGroup_0)
self.maxLimit_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxLimit_0.setObjectName("maxLimit_0")
self.gridLayout_4.addWidget(self.maxLimit_0, 2, 4, 1, 1)
self.maxVelocity_0 = QtWidgets.QLineEdit(self.jointAxisGroup_0)
self.maxVelocity_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxVelocity_0.setObjectName("maxVelocity_0")
self.gridLayout_4.addWidget(self.maxVelocity_0, 2, 5, 1, 1)
self.axisCB_0 = QtWidgets.QComboBox(self.jointAxisGroup_0)
self.axisCB_0.setObjectName("axisCB_0")
self.gridLayout_4.addWidget(self.axisCB_0, 2, 0, 1, 1)
self.maxAccel_0 = QtWidgets.QLineEdit(self.jointAxisGroup_0)
self.maxAccel_0.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.maxAccel_0.setObjectName("maxAccel_0")
self.gridLayout_4.addWidget(self.maxAccel_0, 2, 6, 1, 1)
self.reverse_0 = QtWidgets.QCheckBox(self.jointAxisGroup_0)
self.reverse_0.setObjectName("reverse_0")
self.gridLayout_4.addWidget(self.reverse_0, 2, 7, 1, 1)
self.scale_0 = QtWidgets.QLineEdit(self.jointAxisGroup_0)
self.scale_0.setObjectName("scale_0")
self.gridLayout_4.addWidget(self.scale_0, 2, 2, 1, 1)
self.axisType_0 = QtWidgets.QLabel(self.jointAxisGroup_0)
self.axisType_0.setFrameShape(QtWidgets.QFrame.Box)
self.axisType_0.setText("")
self.axisType_0.setObjectName("axisType_0")
self.gridLayout_4.addWidget(self.axisType_0, 2, 1, 1, 1)
self.label_45 = QtWidgets.QLabel(self.jointAxisGroup_0)
self.label_45.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_45.setObjectName("label_45")
self.gridLayout_4.addWidget(self.label_45, 0, 2, 1, 1)
self.label_43 = QtWidgets.QLabel(self.jointAxisGroup_0)
self.label_43.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_43.setObjectName("label_43")
self.gridLayout_4.addWidget(self.label_43, 0, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.jointAxisGroup_0)
self.label_4.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)
self.label_4.setObjectName("label_4")
self.gridLayout_4.addWidget(self.label_4, 0, 1, 1, 1)
self.label_62 = QtWidgets.QLabel(self.jointAxisGroup_0)
self.label_62.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_62.setWordWrap(True)
self.label_62.setObjectName("label_62")
self.gridLayout_4.addWidget(self.label_62, 0, 3, 1, 1)
self.label_67 = QtWidgets.QLabel(self.jointAxisGroup_0)
self.label_67.setTextFormat(QtCore.Qt.AutoText)
self.label_67.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_67.setWordWrap(True)
self.label_67.setObjectName("label_67")
self.gridLayout_4.addWidget(self.label_67, 0, 4, 1, 1)
self.label_72 = QtWidgets.QLabel(self.jointAxisGroup_0)
self.label_72.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_72.setWordWrap(True)
self.label_72.setObjectName("label_72")
self.gridLayout_4.addWidget(self.label_72, 0, 5, 1, 1)
self.label_73 = QtWidgets.QLabel(self.jointAxisGroup_0)
self.label_73.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_73.setWordWrap(True)
self.label_73.setObjectName("label_73")
self.gridLayout_4.addWidget(self.label_73, 0, 6, 1, 1)
self.gridLayout_2.addWidget(self.jointAxisGroup_0, 0, 0, 1, 2)
self.groupBox_3 = QtWidgets.QGroupBox(self.joint0tab)
self.groupBox_3.setStyleSheet("")
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout_3.setContentsMargins(10, 10, 10, 10)
self.gridLayout_3.setSpacing(5)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_83 = QtWidgets.QLabel(self.groupBox_3)
self.label_83.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_83.setObjectName("label_83")
self.gridLayout_3.addWidget(self.label_83, 1, 0, 1, 1)
self.label_38 = QtWidgets.QLabel(self.groupBox_3)
self.label_38.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
| |
[1, 1, 1, 1, 1] the zScore must be 0.0
result[i] = 0.0
else:
result[i] = (arr[i] - _avg) / _sd
return result
def _min(arr1, arr2):
bc = len(arr1)
result = np.full(bc, nan)
for i in range(bc):
if isfinite(arr1[i]) and isfinite(arr2[i]):
if arr1[i] < arr2[i]:
result[i] = arr1[i]
else:
result[i] = arr2[i]
return result
def _max(arr1, arr2):
bc = len(arr1)
result = np.full(bc, nan)
for i in range(bc):
if isfinite(arr1[i]) and isfinite(arr2[i]):
if arr1[i] > arr2[i]:
result[i] = arr1[i]
else:
result[i] = arr2[i]
return result
def _abs(arr):
bc = len(arr)
result = np.full(bc, nan)
for i in range(bc):
if isfinite(arr[i]):
result[i] = abs(arr[i])
return result
def _value_when(arr, cond):
bc = len(cond)
result = np.full(bc, nan)
cond_value = nan
for i in range(bc):
if not isfinite(cond[i]):
continue
if cond[i] == 1.0 or cond[i] is True:
if isfinite(arr[i]):
cond_value = arr[i]
else:
cond_value = nan
elif not(cond[i] == 0.0 or cond[i] is False or isnan(cond[i])):
raise ValueError("Only True / False / 1.0 / 0.0 and NaN values are allowed for 'cond' array")
if isfinite(arr[i]):
result[i] = cond_value
return result
def _nz(arr, fill_by):
bc = len(arr)
result = np.full(bc, fill_by)
for i in range(bc):
if isfinite(arr[i]):
result[i] = arr[i]
return result
def _roc(arr, period):
if period <= 0:
raise ValueError("{} must be positive number".format(period))
bc = len(arr)
result = np.full(bc, nan)
for i in range(period, bc):
if isfinite(arr[i]) and isfinite(arr[i-period]):
if arr[i] <= 0.0 or arr[i - period] <= 0.0:
raise ValueError("% rate-of-change is only applicable to positive time series, got values less or equal to zero!")
result[i] = (arr[i]/arr[i-period] - 1.0)
return result
def _roc_log(arr, period):
if period <= 0:
raise ValueError("{} must be positive number".format(period))
bc = len(arr)
result = np.full(bc, nan)
for i in range(period, bc):
if isfinite(arr[i]) and isfinite(arr[i-period]):
if arr[i] <= 0.0 or arr[i - period] <= 0.0:
raise ValueError("% rate-of-change is only applicable to positive time series, got values less or equal to zero!")
result[i] = log(arr[i]/arr[i-period])
return result
def _diff(arr, period):
if period <= 0:
raise ValueError("{} must be positive number".format(period))
bc = len(arr)
result = np.full(bc, nan)
for i in range(period, bc):
if isfinite(arr[i]) and isfinite(arr[i-period]):
result[i] = (arr[i] - arr[i-period])
return result
def _rsi(arr, period):
if period <= 0:
raise ValueError('Period must be positive')
bc = len(arr)
result = np.full(bc, nan)
sumup = 0
sumdn = 0
upcnt = 0
dncnt = 0
for i in range(1, bc):
diff = arr[i] - arr[i - 1]
if i > period:
# Remove old_diff from the window first (in case if 'diff' is NaN to avoid skipping)
old_diff = arr[i - period] - arr[i - period - 1]
if isfinite(old_diff):
if old_diff > 0:
sumup -= old_diff
upcnt -= 1
elif old_diff < 0:
sumdn += old_diff
dncnt -= 1
if not isfinite(diff):
continue
if diff > 0:
sumup += diff
upcnt += 1
elif diff < 0:
sumdn -= diff
dncnt += 1
if i >= period:
if upcnt + dncnt > 0:
avgup = 0.0 if upcnt == 0 else sumup / upcnt
avgdn = 0.0 if dncnt == 0 else sumdn / dncnt
rsi = 100.0 * avgup / (avgup + avgdn)
assert rsi < 101
assert rsi >= 0
result[i] = rsi
else:
result[i] = 50.0
return result
def _rangehilo(o, h, l, c, period):
if period <= 0:
raise ValueError()
bc = len(c)
result = np.full(bc, nan)
cur_hhv = nan
icur_hhv = -1
cur_llv = nan
icur_llv = -1
for i in range(bc):
if isfinite(h[i]):
if h[i] > cur_hhv or not isfinite(cur_hhv):
cur_hhv = h[i]
icur_hhv = i
else:
if i - icur_hhv >= period:
cur_hhv = nan
icur_hhv = i - period + 1
for k in range(i - period + 1, i + 1):
if not isfinite(h[k]):
continue
if k == i - period + 1 or h[k] > cur_hhv or not isfinite(cur_hhv):
cur_hhv = h[k]
icur_hhv = k
if isfinite(l[i]):
if l[i] < cur_llv or not isfinite(cur_llv):
cur_llv = l[i]
icur_llv = i
else:
if i - icur_llv >= period:
cur_llv = nan
icur_llv = i - period + 1
for k in range(i - period + 1, i + 1):
if not isfinite(l[k]):
continue
if k == i - period + 1 or l[k] < cur_llv or not isfinite(cur_llv):
cur_llv = l[k]
icur_llv = k
if i >= period - 1:
_o = o[i-period+1]
_h = cur_hhv
_l = cur_llv
_c = c[i]
if not (isfinite(_o) and isfinite(_c) and isfinite(_h) and isfinite(_l) and isfinite(h[i]) and isfinite(l[i])):
continue
# Do sanity checks
if h[i] < l[i]:
raise ValueError("Input data error: H < L")
if c[i] > h[i] or c[i] < l[i]:
raise ValueError("Input data error: C < L or C > H")
if o[i] > h[i] or o[i] < l[i]:
raise ValueError("Input data error: O < L or O > H")
# Calculate RangeHiLo
# RangeHilo - is measure of Doji'ness of the candle(period=1) or range
# 1.0 - means that a candle is exact Doji
# 0.0 - means that a candle is trending from Open price to Close (where open is min/max and close is min/max price of candle)
if _h - _l == 0.0:
# The range candle is like '-' -> it's closer to Doji rather than to trend candle
# 2018-02-17 But 1.0 is an another extreme case, because of this the algorithm result must be ambigous i.e. = 0.5
result[i] = 0.5
else:
result[i] = ((_h - max(_o, _c)) + (min(_o, _c) - _l)) / (_h - _l)
return result
def _rangeclose(h, l, c, period):
if period <= 0:
raise ValueError()
bc = len(c)
result = np.full(bc, nan)
cur_hhv = nan
icur_hhv = -1
cur_llv = nan
icur_llv = -1
for i in range(bc):
if isfinite(h[i]):
if h[i] > cur_hhv or not isfinite(cur_hhv):
cur_hhv = h[i]
icur_hhv = i
else:
if i - icur_hhv >= period:
cur_hhv = nan
icur_hhv = i - period + 1
for k in range(i - period + 1, i + 1):
if not isfinite(h[k]):
continue
if k == i - period + 1 or h[k] > cur_hhv or not isfinite(cur_hhv):
cur_hhv = h[k]
icur_hhv = k
if isfinite(l[i]):
if l[i] < cur_llv or not isfinite(cur_llv):
cur_llv = l[i]
icur_llv = i
else:
if i - icur_llv >= period:
cur_llv = nan
icur_llv = i - period + 1
for k in range(i - period + 1, i + 1):
if not isfinite(l[k]):
continue
if k == i - period + 1 or l[k] < cur_llv or not isfinite(cur_llv):
cur_llv = l[k]
icur_llv = k
if i >= period - 1:
_h = cur_hhv
_l = cur_llv
_c = c[i]
if not (isfinite(_c) and isfinite(_h) and isfinite(_l) and isfinite(h[i]) and isfinite(l[i])):
continue
# Do sanity checks
if h[i] < l[i]:
raise ValueError("Input data error: H < L")
if c[i] > h[i] or c[i] < l[i]:
raise ValueError("Input data error: C < L or C > H")
# Calculate RangeClose
# val[i] = (pC[i] - cur_llv) / (cur_hhv - cur_llv);
if _h - _l == 0.0:
# The range candle is like '-' -> keep neutral indicator value
result[i] = 0.5
else:
result[i] = (_c - _l) / (_h - _l)
return result
def _wma(arr, weight, period):
if period <= 0:
raise ValueError('Period must be positive')
bc = len(arr)
result = np.full(bc, nan)
_sum_total = 0.0
_valid_w = 0.0
for i in range(bc):
a = arr[i]
w = weight[i]
if i < period:
if isfinite(a) and isfinite(w):
if w < 0:
raise ValueError('Negative weight is not allowed')
_sum_total += a * w
_valid_w += w
if i == period - 1 and _valid_w > 0:
result[i] = _sum_total / _valid_w
else:
a_prev = arr[i - period]
w_prev = weight[i - period]
if isfinite(a_prev) and isfinite(w_prev):
_sum_total -= a_prev * w_prev
_valid_w -= w_prev
if isfinite(a) and isfinite(w):
if w < 0:
| |
<reponame>scrambler-crypto/pyecsca
"""
This module provides an `ECTester <https://github.com/crocs-muni/ECTester/>`_ target class.
"""
from abc import ABC
from binascii import hexlify
from enum import IntEnum, IntFlag
from functools import reduce
from math import ceil, log
from operator import or_
from typing import Optional, Mapping, List, Union
from public import public
from smartcard.CardConnection import CardConnection
from smartcard.Exceptions import CardConnectionException
from .ISO7816 import CommandAPDU, ResponseAPDU, ISO7816
from .PCSC import PCSCTarget
from ...ec.model import ShortWeierstrassModel
from ...ec.params import DomainParameters
from ...ec.point import Point
class ShiftableFlag(IntFlag): # pragma: no cover
def __lshift__(self, other):
val = int(self) << other
for e in self.__class__:
if val == e.value:
return e
raise ValueError
def __rshift__(self, other):
val = int(self) >> other
for e in self.__class__:
if val == e.value:
return e
raise ValueError
def __iter__(self):
val = int(self)
for e in self.__class__:
i = int(e)
if i & val == i:
while i % 2 == 0 and i != 0:
i //= 2
if i == 1:
yield e
@public
class KeypairEnum(ShiftableFlag): # pragma: no cover
"""ECTester's KeyPair type."""
KEYPAIR_LOCAL = 0x01
KEYPAIR_REMOTE = 0x02
KEYPAIR_BOTH = KEYPAIR_LOCAL | KEYPAIR_REMOTE
@public
class InstructionEnum(IntEnum): # pragma: no cover
"""ECTester's instruction (INS)."""
INS_ALLOCATE = 0x5a
INS_CLEAR = 0x5b
INS_SET = 0x5c
INS_TRANSFORM = 0x5d
INS_GENERATE = 0x5e
INS_EXPORT = 0x5f
INS_ECDH = 0x70
INS_ECDH_DIRECT = 0x71
INS_ECDSA = 0x72
INS_ECDSA_SIGN = 0x73
INS_ECDSA_VERIFY = 0x74
INS_CLEANUP = 0x75
INS_ALLOCATE_KA = 0x76
INS_ALLOCATE_SIG = 0x77
INS_GET_INFO = 0x78
INS_SET_DRY_RUN_MODE = 0x79
INS_BUFFER = 0x7a
INS_PERFORM = 0x7b
@public
class KeyBuildEnum(IntEnum): # pragma: no cover
"""ECTester's key builder type."""
BUILD_KEYPAIR = 0x01
BUILD_KEYBUILDER = 0x02
@public
class ExportEnum(IntEnum): # pragma: no cover
"""ECTester's export boolean."""
EXPORT_TRUE = 0xff
EXPORT_FALSE = 0x00
@classmethod
def from_bool(cls, val: bool):
return cls.EXPORT_TRUE if val else cls.EXPORT_FALSE
@public
class RunModeEnum(IntEnum): # pragma: no cover
"""ECTester's run mode."""
MODE_NORMAL = 0xaa
MODE_DRY_RUN = 0xbb
@public
class KeyEnum(ShiftableFlag): # pragma: no cover
"""ECTester's key enum."""
PUBLIC = 0x01
PRIVATE = 0x02
BOTH = PRIVATE | PUBLIC
@public
class AppletBaseEnum(IntEnum): # pragma: no cover
"""ECTester's JavaCard applet base version."""
BASE_221 = 0x0221
BASE_222 = 0x0222
@public
class KeyClassEnum(IntEnum): # pragma: no cover
"""JavaCard EC-based key class."""
ALG_EC_F2M = 4
ALG_EC_FP = 5
@public
class KeyAgreementEnum(IntEnum): # pragma: no cover
"""JavaCard `KeyAgreement` type values."""
ALG_EC_SVDP_DH = 1
ALG_EC_SVDP_DH_KDF = 1
ALG_EC_SVDP_DHC = 2
ALG_EC_SVDP_DHC_KDF = 2
ALG_EC_SVDP_DH_PLAIN = 3
ALG_EC_SVDP_DHC_PLAIN = 4
ALG_EC_PACE_GM = 5
ALG_EC_SVDP_DH_PLAIN_XY = 6
@public
class SignatureEnum(IntEnum): # pragma: no cover
"""JavaCard `Signature` type values."""
ALG_ECDSA_SHA = 17
ALG_ECDSA_SHA_224 = 37
ALG_ECDSA_SHA_256 = 33
ALG_ECDSA_SHA_384 = 34
ALG_ECDSA_SHA_512 = 38
@public
class TransformationEnum(ShiftableFlag): # pragma: no cover
"""ECTester's point/value transformation types."""
NONE = 0x00
FIXED = 0x01
FULLRANDOM = 0x02
ONEBYTERANDOM = 0x04
ZERO = 0x08
ONE = 0x10
MAX = 0x20
INCREMENT = 0x40
INFINITY = 0x80
COMPRESS = 0x0100
COMPRESS_HYBRID = 0x0200
MASK_04 = 0x0400
@public
class FormatEnum(IntEnum): # pragma: no cover
"""ECTester's point format types."""
UNCOMPRESSED = 0
COMPRESSED = 1
HYBRID = 2
@public
class CurveEnum(IntEnum): # pragma: no cover
"""ECTester's curve constants."""
default = 0x00
external = 0xff
secp112r1 = 0x01
secp128r1 = 0x02
secp160r1 = 0x03
secp192r1 = 0x04
secp224r1 = 0x05
secp256r1 = 0x06
secp384r1 = 0x07
secp521r1 = 0x08
sect163r1 = 0x09
sect233r1 = 0x0a
sect283r1 = 0x0b
sect409r1 = 0x0c
sect571r1 = 0x0d
@public
class ParameterEnum(ShiftableFlag): # pragma: no cover
"""ECTester's parameter ids."""
NONE = 0x00
FP = 0x01
F2M = 0x02
A = 0x04
B = 0x08
G = 0x10
R = 0x20
K = 0x40
W = 0x80
S = 0x0100
DOMAIN_FP = FP | A | B | G | R | K
DOMAIN_F2M = F2M | A | B | G | R | K
KEYPAIR = W | S
ALL = FP | F2M | A | B | G | R | K | W | S
@public
class ChunkingException(Exception): # pragma: no cover
"""An exception that is raised if an error happened during the chunking process of a large APDU."""
pass
class Response(ABC): # pragma: no cover
"""An abstract base class of a response APDU."""
resp: ResponseAPDU
sws: List[int]
params: List[bytes]
success: bool = True
error: bool = False
def __init__(self, resp: ResponseAPDU, num_sw: int, num_params: int):
self.resp = resp
self.sws = [0 for _ in range(num_sw)]
self.params = [bytes() for _ in range(num_params)]
offset = 0
for i in range(num_sw):
if len(resp.data) >= offset + 2:
self.sws[i] = int.from_bytes(resp.data[offset:offset + 2], "big")
offset += 2
if self.sws[i] != ISO7816.SW_NO_ERROR:
self.success = False
else:
self.success = False
self.error = True
if self.resp.sw != ISO7816.SW_NO_ERROR:
self.success = False
self.error = False
for i in range(num_params):
if len(resp.data) < offset + 2:
self.success = False
self.error = True
break
param_len = int.from_bytes(resp.data[offset:offset + 2], "big")
offset += 2
if len(resp.data) < offset + param_len:
self.success = False
self.error = True
break
self.params[i] = resp.data[offset:offset + param_len]
offset += param_len
def __repr__(self):
return f"{self.__class__.__name__}(sws=[{', '.join(list(map(hex, self.sws)))}], sw={hex(self.resp.sw)}, success={self.success}, error={self.error})"
@public
class AllocateKaResponse(Response): # pragma: no cover
"""A response to the KeyAgreement allocation command."""
def __init__(self, resp: ResponseAPDU):
super().__init__(resp, 1, 0)
@public
class AllocateSigResponse(Response): # pragma: no cover
"""A response to the Signature allocation command."""
def __init__(self, resp: ResponseAPDU):
super().__init__(resp, 1, 0)
@public
class AllocateResponse(Response): # pragma: no cover
"""A response to the KeyPair allocation command."""
def __init__(self, resp: ResponseAPDU, keypair: KeypairEnum):
super().__init__(resp, 2 if keypair == KeypairEnum.KEYPAIR_BOTH else 1, 0)
@public
class ClearResponse(Response): # pragma: no cover
"""A response to the Clear key command."""
def __init__(self, resp: ResponseAPDU, keypair: KeypairEnum):
super().__init__(resp, 2 if keypair == KeypairEnum.KEYPAIR_BOTH else 1, 0)
@public
class SetResponse(Response): # pragma: no cover
"""A response to the Set command."""
def __init__(self, resp: ResponseAPDU, keypair: KeypairEnum):
super().__init__(resp, 2 if keypair == KeypairEnum.KEYPAIR_BOTH else 1, 0)
@public
class TransformResponse(Response): # pragma: no cover
"""A response to the Transform command."""
def __init__(self, resp: ResponseAPDU, keypair: KeypairEnum):
super().__init__(resp, 2 if keypair == KeypairEnum.KEYPAIR_BOTH else 1, 0)
@public
class GenerateResponse(Response): # pragma: no cover
"""A response to the Generate command."""
def __init__(self, resp: ResponseAPDU, keypair: KeypairEnum):
super().__init__(resp, 2 if keypair == KeypairEnum.KEYPAIR_BOTH else 1, 0)
@public
class ExportResponse(Response): # pragma: no cover
"""A response to the Export command, contains the exported parameters/values."""
keypair: KeypairEnum
key: KeyEnum
parameters: ParameterEnum
def __init__(self, resp: ResponseAPDU, keypair: KeypairEnum, key: KeyEnum,
params: ParameterEnum):
self.keypair = keypair
self.key = key
self.parameters = params
exported = 2 if keypair == KeypairEnum.KEYPAIR_BOTH else 1
keys = 2 if key == KeyEnum.BOTH else 1
param_count = 0
param = ParameterEnum.FP
while True:
if param & params:
param_count += 1
if param == ParameterEnum.K:
break
param <<= 1
other = 0
other += 1 if key & KeyEnum.PUBLIC and params & ParameterEnum.W else 0
other += 1 if key & KeyEnum.PRIVATE and params & ParameterEnum.S else 0
super().__init__(resp, exported, exported * keys * param_count + exported * other)
def get_index(self, keypair: KeypairEnum, param: ParameterEnum) -> Optional[int]:
pair = KeypairEnum.KEYPAIR_LOCAL
index = 0
while True:
mask = ParameterEnum.FP
while True:
if pair == keypair and param == mask:
return index
if self.parameters & mask and self.keypair & pair:
if mask == ParameterEnum.W:
if self.key & KeyEnum.PUBLIC:
index += 1
elif mask == ParameterEnum.S:
if self.key & KeyEnum.PRIVATE:
index += 1
else:
index += 1
if mask == ParameterEnum.S:
break
mask <<= 1
if pair == KeypairEnum.KEYPAIR_REMOTE:
break
pair <<= 1
return None
def get_param(self, keypair: KeypairEnum, param: ParameterEnum) -> Optional[bytes]:
index = self.get_index(keypair, param)
if index is not None:
return self.params[index]
return None
def __repr__(self):
return f"{self.__class__.__name__}(sws=[{', '.join(list(map(hex, self.sws)))}], sw={hex(self.resp.sw)}, success={self.success}, error={self.error}, " \
f"keypair={self.keypair.name}, key={self.key.name}, params={self.parameters.name})"
@public
class ECDHResponse(Response): # pragma: no cover
"""A response to the ECDH and ECDH_direct KeyAgreement commands."""
def __init__(self, resp: ResponseAPDU, export: bool):
super().__init__(resp, 1, 1 if export else 0)
@property
def secret(self):
if len(self.params) != 0:
return self.params[0]
return None
def __repr__(self):
return f"{self.__class__.__name__}(sws=[{', '.join(list(map(hex, self.sws)))}], sw={hex(self.resp.sw)}, success={self.success}, error={self.error}, secret={hexlify(self.secret).decode() if self.secret else ''})"
@public
class ECDSAResponse(Response): # pragma: no cover
"""A response to the ECDSA and ECDSA sign and ECDSA verify commands."""
def __init__(self, resp: ResponseAPDU, export: bool):
super().__init__(resp, 1, 1 if export else 0)
@property
def signature(self):
if len(self.params) != 0:
return self.params[0]
return None
def __repr__(self):
return f"{self.__class__.__name__}(sws=[{', '.join(list(map(hex, self.sws)))}], sw={hex(self.resp.sw)}, success={self.success}, error={self.error}, sig={hexlify(self.signature).decode() if self.signature else | |
#!/usr/bin/env python3
# vim: sta:et:sw=4:ts=4:sts=4
"""
NAME
fermi_helper.py - Build and run Fermi HEP workflow using Docker/Singularity
SYNOPSIS
python3 fermi_helper.py build-docker-image [--tag TAG]
[--only-dependencies] [--pull-dependencies TAG] [--decaf-root ROOT]
[--decaf-repo REPO] [--decaf-repo-branch BRANCH]
python3 fermi_helper.py run-docker-image [--tag TAG] [--interactive]
python3 fermi_helper.py build-singularity-image [--tag TAG] [--sif SIF]
python3 fermi_helper.py run-singularity-image [--sif SIF] [--interactive]
EXAMPLE
Install Python dependency to run this script
$ python3 -m pip install --user jinja2
Build the Docker image, using pre-built dependencies
$ python3 fermi_helper.py build-docker-image --pull-dependencies thobson2/decaf-fermi:0.2.0-base
Run the workflow within Docker container
$ python3 fermi_helper.py run-docker-image
Build the dependencies and push them to DockerHub
$ python3 fermi_helper.py build-docker-image --only-dependencies --tag USERNAME/decaf-fermi:0.2.0-base
Run a shell within the Docker container interactively
$ python3 fermi_helper.py run-docker-image --interactive
docker$ mpirun --hostfile
mpirun --hostfile hostfile_workflow.txt -np 4 ./decaf-henson_python
Convert Docker image to Singularity image
$ python3 fermi_helper.py build-singularity-image
Run Singularity image
$ python3 fermi_helper.py run-singularity-image
DEBUGGING
Run the Docker container interactively
$ python3 fermi_helper.py run-docker-image --interactively
Run the workflow directly
docker$ mpirun --hostfile hostfile_workflow.txt -np 4 ./decaf-henson_python
To make things easier, install a text editor
docker$ apt-get update
docker$ apt-get install -y vim
To run the workflow in steps, first make a backup of the decaf-henson.json
file, before modifying it to remove the second half of the workflow (i.e.
the converter.py, approx.py, chi2.py, and new_box.py steps)
docker$ cp decaf-henson.json decaf-henson.json.bak
docker$ vi decaf-henson.json
Now run the workflow
docker$ mpirun --hostfile hostfile_workflow.txt -np 4 ./decaf-henson_python
Next, using the backup version of the decaf-henson.json file, extract the
command lines to run the second half step-by-step
docker$ grep cmdline decaf-henson.json.bak
Then modify each of the scripts to remove any reference the pyhenson
(specifically h.add, h.get, and h.yield_ functions)
docker$ vi converter.py # step = h.get("step") => step = 99
docker$ vi approx.py # nothing to change here
docker$ vi chi2.py # nothing to change here
docker$ vi new_box.py # nothing to change here
Now each command line can be run directly, after adding "python" to the
beginning, for example (the exact directories will be different)
docker$ python ./converter.py converter-henson.h5 /tmp/tmp.OrAXakGCKI/stage/examples/fermi_hep/deleteMe/
docker$ python ./approx.py converter-henson.h5 henson_approx.json
docker$ python ./chi2.py henson_approx.json exerimental_data.json
docker$ python ./new_box.py henson_approx.json.minimization newbox.json
NOTE: Anything done inside the container will be lost when the
"run-docker-image" command finishes. To make lasting changes, modify the
"decaf" directory from the host directory and re-run "build-docker-image".
DESCRIPTION
This script takes care of the build and execution process needed to run the
Fermi workflow using Decaf within Linux containers.
The build does everything within a container, which means that the entire
build process happens inside of Docker, and the image is, in a sense,
hermetically sealed away from the host system. The catch is that any change
to source code requires a complete re-build of all of Decaf and the
workflow, which can take up to 5 minutes.
Either build process happens within Docker first and uses a Dockerfile to
define the commands to be run. This Docker image can be used directly, or
can be converted into a Singularity image and then run using Singularity.
build-docker-image
Copy the source code into Docker and build Decaf and the Fermi
workflow.
run-docker-image
Run the workflow inside of Docker using the already-built Docker image.
build-singularity-image
Convert a Docker image into a Singularity image
run-singularity-image
Run the workflow inside of Singularity
OPTIONS
--tag TAG
Set the Docker image tag to be used. If named something like
USERNAME/IMAGENAME:VERSION, then the image will be pushed to a Docker
registry afterwards. Otherwise, a name like IMAGENAME:VERSION will only
be saved locally.
--sif SIF
Set the path to the Singularity image to be used.
--interactive
Instead of immediately running the workflow, open a shell into the
container to manually run the workflow and debug.
--decaf-root ROOT
Set the location of the decaf source code (including Fermi workflow).
--decaf-repo REPO
If the Decaf root directory doesn't exist, Decaf is first cloned using
this repo url.
--decaf-repo-branch BRANCH
The branch to be checked out after cloning Decaf (see --decaf-repo).
--only-dependencies
Only build the dependencies inside of Docker, without compiled Decaf.
--pull-dependencies TAG
Instead of building the whole set of dependencies, use the pre-built
image TAG.
FILES
go.sh
A helper script used in the Dockerfile to run CMake with the correct
arguments.
docker.env.sh
A helper script that sets some variables to be used inside the go.sh
script.
NOTES
The build-docker-image and run-docker-image commands require Docker to be
installed, but do not require Singularity installed. Likewise,
build-singularity-image and run-singularity-image require Singularity, but
not Docker. This means that those commands can be run on different machines
(provided the image is pushed to a registry, c.f. --tag option above with a
"/" separator)
BUGS
Currently, even if Python source code is changed, the hermetic build and
run process will rebuild everything, despite it being unnecessary for an
interpreted script. This could be fixed in one of two ways: 1) copy only
C++ source code first and then Python source code, or 2) build and run
incrementally.
CHANGELOG
v0.2.6, 16 April 2021
Add missing zlib package.
v0.2.5, 9 April 2021
Fix regression with Spack spec for py-h5py and hdf5 packages.
v0.2.4, 9 April 2021
Add missing pandas dependency.
v0.2.3, 23 March 2021
Update the version of apprentice within the repository to the latest
version from GitHub at this time (commit 6fbf53).
v0.2.2, 18 March 2021
Changed the default branch to the new "fermi-workflow" branch.
Added documentation on running the workflow interactively to aid in
debugging.
v0.2.1, 15 October 2020
Fixed a problem that would cause template parametrization to fail to
apply to the workflow which pythia8-diy would error out on.
The root cause is that pythia8-diy can read the mb7tev.txt file from a
few different places: in the current directory and in a subdirectory
(under "deleteMe"). Although the new mb7tev.txt file is created, the
old one is not automatically removed, so the workflow reads the wrong
file.
In the previous version, a remnant of the old runtime directory code
was used. In this version, moving a file from "$FERMI_PREFIX" would
correspond to moving it from the current directory. But now, the
current directory is nested under /tmp, so the file wasn't moved, and
in particular, the old one wasn't deleted. Now the file is moved from
the current directory, and this resolves the problem.
v0.2.0, 01 October 2020
Remove incremental building for now until it's been tested more. Right
now, only hermetic builds are supported, though now the dependencies
can be pre-built and saved to a Docker registry to be used. This needs
templates to work effectively, hence the introduction of the jinja2
library dependency.
v0.1.0, 24 September 2020
First release with full support for hermetic builds and in-progress
support for incremental ones.
AUTHORS
<NAME> <<EMAIL>>
"""
from subprocess import run
from textwrap import dedent
from pathlib import Path
from jinja2 import Template
setupscript = dedent("""\
#ls -lah /.singularity.d/
. /etc/profile
#cat /.singularity.d/runscript -A
#set -euo pipefail
""")
hermeticscript = dedent("""\
#ls -lah /.singularity.d/
. /etc/profile
#cat /.singularity.d/runscript -A
set -euo pipefail
DECAF_PREFIX=/opt/decaf/stage
DECAF_HENSON_PREFIX=${DECAF_PREFIX:?}/examples/henson
FERMI_PREFIX=${DECAF_PREFIX:?}/examples/fermi_hep
cd "$(TMPDIR=/tmp mktemp -d)"
tmpdir=$PWD
cp -r "${DECAF_PREFIX:?}" "${tmpdir:?}"
cd stage/examples/fermi_hep
echo $PWD
mkdir conf
mv mb7tev.txt conf/
cp hep-fullWorkflow-inputPre.json ./decaf-henson.json
sed -ie 's!/home/oyildiz/mohan/fermi-workflow/install!'"$tmpdir/stage"'!g' ./decaf-henson.json
#sed -ie 's!\\./!'"${FERMI_PREFIX:?}/"'!g' ./decaf-henson.json
#cp "${FERMI_PREFIX:?}/hostfile_workflow.txt" ./hostfile_workflow.txt
cp ../henson/python/decaf-henson_python ./decaf-henson_python
LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH:?}:}${DECAF_PREFIX:?three}/lib
export DECAF_PREFIX LD_LIBRARY_PATH
ls -lah
""")
runscript = dedent("""\
mpirun --hostfile hostfile_workflow.txt -np 4 ./decaf-henson_python
""")
intscript = dedent("""\
exec bash
""")
dockerfile_template = Template(dedent("""\
{% if not pull_dependencies %}
# Build stage with Spack pre-installed and ready to be used
FROM spack/ubuntu-bionic:latest as builder
# What we want to install and how we want to install it
# is specified in a manifest file (spack.yaml)
RUN mkdir /opt/spack-environment \\
&& (echo "spack:" \\
&& echo " view: /opt/view" \\
&& echo " specs:" \\
&& echo " - boost" \\
&& echo " - cmake" \\
&& echo " - henson +mpi-wrappers +python ^mpich@3.3.2 ^python@3.8.2" \\
&& echo " - py-h5py ^hdf5@1.10.2+hl ^mpich@3.3.2 ^python@3.8.2" \\
&& | |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0952683,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.71596,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.023673,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.221283,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.130836,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.053942,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0870064,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0439179,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.184866,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0416339,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.13297,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0247177,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00226257,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.025102,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0167331,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0498196,
'Execution Unit/Register Files/Runtime Dynamic': 0.0189956,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0588024,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.159653,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.990174,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 4.22038e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 4.22038e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 3.69157e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 1.43762e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000240372,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000361695,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000399061,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0160859,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.02321,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0384691,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0546351,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.29138,
'Instruction Fetch Unit/Runtime Dynamic': 0.109951,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0433201,
'L2/Runtime Dynamic': 0.0123047,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.91804,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.343693,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0220295,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0220294,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.02207,
'Load Store Unit/Runtime Dynamic': 0.474364,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.054321,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.108641,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0192787,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0199267,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0636192,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00631411,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.252845,
'Memory Management Unit/Runtime Dynamic': 0.0262408,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.3321,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0650207,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.003225,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0265951,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
is not None:
result['allowedMulti'] = self.allowed_multi
if self.approval_type is not None:
result['approvalType'] = self.approval_type
if self.approval_method is not None:
result['approvalMethod'] = self.approval_method
if self.actor_activate_type is not None:
result['actorActivateType'] = self.actor_activate_type
if self.required is not None:
result['required'] = self.required
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('actorKey') is not None:
self.actor_key = m.get('actorKey')
if m.get('actorType') is not None:
self.actor_type = m.get('actorType')
if m.get('actorSelectionType') is not None:
self.actor_selection_type = m.get('actorSelectionType')
if m.get('actorSelectionRange') is not None:
temp_model = ProcessForecastResponseBodyResultWorkflowActivityRulesWorkflowActorActorSelectionRange()
self.actor_selection_range = temp_model.from_map(m['actorSelectionRange'])
if m.get('allowedMulti') is not None:
self.allowed_multi = m.get('allowedMulti')
if m.get('approvalType') is not None:
self.approval_type = m.get('approvalType')
if m.get('approvalMethod') is not None:
self.approval_method = m.get('approvalMethod')
if m.get('actorActivateType') is not None:
self.actor_activate_type = m.get('actorActivateType')
if m.get('required') is not None:
self.required = m.get('required')
return self
class ProcessForecastResponseBodyResultWorkflowActivityRules(TeaModel):
def __init__(
self,
activity_id: str = None,
prev_activity_id: str = None,
activity_name: str = None,
activity_type: str = None,
is_target_select: bool = None,
workflow_actor: ProcessForecastResponseBodyResultWorkflowActivityRulesWorkflowActor = None,
):
# 节点 id
self.activity_id = activity_id
# 流程中前一个节点的 id
self.prev_activity_id = prev_activity_id
# 节点名称
self.activity_name = activity_name
# 规则类型
self.activity_type = activity_type
# 是否自选审批节点
self.is_target_select = is_target_select
# 节点操作人信息
self.workflow_actor = workflow_actor
def validate(self):
if self.workflow_actor:
self.workflow_actor.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.activity_id is not None:
result['activityId'] = self.activity_id
if self.prev_activity_id is not None:
result['prevActivityId'] = self.prev_activity_id
if self.activity_name is not None:
result['activityName'] = self.activity_name
if self.activity_type is not None:
result['activityType'] = self.activity_type
if self.is_target_select is not None:
result['isTargetSelect'] = self.is_target_select
if self.workflow_actor is not None:
result['workflowActor'] = self.workflow_actor.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('activityId') is not None:
self.activity_id = m.get('activityId')
if m.get('prevActivityId') is not None:
self.prev_activity_id = m.get('prevActivityId')
if m.get('activityName') is not None:
self.activity_name = m.get('activityName')
if m.get('activityType') is not None:
self.activity_type = m.get('activityType')
if m.get('isTargetSelect') is not None:
self.is_target_select = m.get('isTargetSelect')
if m.get('workflowActor') is not None:
temp_model = ProcessForecastResponseBodyResultWorkflowActivityRulesWorkflowActor()
self.workflow_actor = temp_model.from_map(m['workflowActor'])
return self
class ProcessForecastResponseBodyResultWorkflowForecastNodes(TeaModel):
def __init__(
self,
activity_id: str = None,
out_id: str = None,
):
# 节点 id
self.activity_id = activity_id
# 节点出线 id
self.out_id = out_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.activity_id is not None:
result['activityId'] = self.activity_id
if self.out_id is not None:
result['outId'] = self.out_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('activityId') is not None:
self.activity_id = m.get('activityId')
if m.get('outId') is not None:
self.out_id = m.get('outId')
return self
class ProcessForecastResponseBodyResult(TeaModel):
def __init__(
self,
is_forecast_success: bool = None,
process_code: str = None,
user_id: str = None,
process_id: int = None,
is_static_workflow: bool = None,
workflow_activity_rules: List[ProcessForecastResponseBodyResultWorkflowActivityRules] = None,
workflow_forecast_nodes: List[ProcessForecastResponseBodyResultWorkflowForecastNodes] = None,
):
# 是否预测成功
self.is_forecast_success = is_forecast_success
# 流程 code
self.process_code = process_code
# 用户 id
self.user_id = user_id
# 流程 id
self.process_id = process_id
# 是否静态流程
self.is_static_workflow = is_static_workflow
self.workflow_activity_rules = workflow_activity_rules
self.workflow_forecast_nodes = workflow_forecast_nodes
def validate(self):
if self.workflow_activity_rules:
for k in self.workflow_activity_rules:
if k:
k.validate()
if self.workflow_forecast_nodes:
for k in self.workflow_forecast_nodes:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.is_forecast_success is not None:
result['isForecastSuccess'] = self.is_forecast_success
if self.process_code is not None:
result['processCode'] = self.process_code
if self.user_id is not None:
result['userId'] = self.user_id
if self.process_id is not None:
result['processId'] = self.process_id
if self.is_static_workflow is not None:
result['isStaticWorkflow'] = self.is_static_workflow
result['workflowActivityRules'] = []
if self.workflow_activity_rules is not None:
for k in self.workflow_activity_rules:
result['workflowActivityRules'].append(k.to_map() if k else None)
result['workflowForecastNodes'] = []
if self.workflow_forecast_nodes is not None:
for k in self.workflow_forecast_nodes:
result['workflowForecastNodes'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('isForecastSuccess') is not None:
self.is_forecast_success = m.get('isForecastSuccess')
if m.get('processCode') is not None:
self.process_code = m.get('processCode')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('processId') is not None:
self.process_id = m.get('processId')
if m.get('isStaticWorkflow') is not None:
self.is_static_workflow = m.get('isStaticWorkflow')
self.workflow_activity_rules = []
if m.get('workflowActivityRules') is not None:
for k in m.get('workflowActivityRules'):
temp_model = ProcessForecastResponseBodyResultWorkflowActivityRules()
self.workflow_activity_rules.append(temp_model.from_map(k))
self.workflow_forecast_nodes = []
if m.get('workflowForecastNodes') is not None:
for k in m.get('workflowForecastNodes'):
temp_model = ProcessForecastResponseBodyResultWorkflowForecastNodes()
self.workflow_forecast_nodes.append(temp_model.from_map(k))
return self
class ProcessForecastResponseBody(TeaModel):
def __init__(
self,
result: ProcessForecastResponseBodyResult = None,
):
# 返回结果
self.result = result
def validate(self):
if self.result:
self.result.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.result is not None:
result['result'] = self.result.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('result') is not None:
temp_model = ProcessForecastResponseBodyResult()
self.result = temp_model.from_map(m['result'])
return self
class ProcessForecastResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ProcessForecastResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ProcessForecastResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GrantCspaceAuthorizationHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GrantCspaceAuthorizationRequest(TeaModel):
def __init__(
self,
space_id: str = None,
type: str = None,
user_id: str = None,
duration_seconds: int = None,
ding_corp_id: str = None,
ding_org_id: int = None,
ding_isv_org_id: int = None,
ding_suite_key: str = None,
ding_token_grant_type: int = None,
):
# 审批控件 id。
self.space_id = space_id
# 权限类型。
self.type = type
# 用户 id。
self.user_id = user_id
# 权限有效时间,单位为秒。
self.duration_seconds = duration_seconds
self.ding_corp_id = ding_corp_id
self.ding_org_id = ding_org_id
self.ding_isv_org_id = ding_isv_org_id
self.ding_suite_key = ding_suite_key
self.ding_token_grant_type = ding_token_grant_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.type is not None:
result['type'] = self.type
if self.user_id is not None:
result['userId'] = self.user_id
if self.duration_seconds is not None:
result['durationSeconds'] = self.duration_seconds
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('durationSeconds') is not None:
self.duration_seconds = m.get('durationSeconds')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
return self
class GrantCspaceAuthorizationResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
):
self.headers = headers
def validate(self):
self.validate_required(self.headers, 'headers')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
return self
class QueryAllProcessInstancesHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if | |
fieldsets = (
('Details', {
'classes': ('collapse',),
'fields': ('resultid',
'xlocation',
'xlocationunitsid',
'ylocation',
'ylocationunitsid',
'spatialreferenceid',
'intendedzspacing',
'intendedzspacingunitsid',
'intendedtimespacing',
'intendedtimespacingunitsid',
'aggregationstatisticcv',
)
}),
)
extra = 0
class ReadOnlyProfileResultsInline(ProfileResultsInline):
readonly_fields = ProfileResultsInline.fieldsets[0][1]['fields']
can_delete = False
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
class ResultsAdminForm(ModelForm):
# featureactionid = make_ajax_field(Featureactions,'featureactionid','featureaction_lookup',
# max_length=500)
featureactionid = AutoCompleteSelectField('featureaction_lookup', required=True, help_text='',
label='Sampling feature / location action',show_help_text =None)
def clean_featureactionid(self):
featureactioniduni = self.data['featureactionid']
featureactionid = None
for faiduni in featureactioniduni.split("-"):
if faiduni.isdigit():
featureactionid = faiduni
continue
featureaction = Featureactions.objects.filter(featureactionid=featureactionid).get()
return featureaction
class Meta:
model = Results
fields = '__all__'
# make_ajax_field doesn't work with the add + green plus on the field
# widgets = {
# 'featureactionid': autocomplete.ModelSelect2(url='featueactions-autocomplete')
# }
# The user can click, a popup window lets them create a new object, they click save,
# the popup closes and the AjaxSelect field is set.
# Your Admin must inherit from AjaxSelectAdmin
# http://django-ajax-selects.readthedocs.org/en/latest/Admin-add-popup.html
class ResultsAdmin(ReadOnlyAdmin): # admin.ModelAdmin
# The user can click, a popup window lets them create a new object,
# they click save, the popup closes and the AjaxSelect field is set.
# http://django-ajax-selects.readthedocs.org/en/latest/Admin-add-popup.html
# For readonly usergroup
user_readonly = [p.name for p in Results._meta.get_fields() if not p.one_to_many]
user_readonly_inlines = [ReadOnlyTimeseriesresultsInline,
ReadOnlyMeasurementResultsInline,
ReadOnlyProfileResultsInline]
# For admin users
form = ResultsAdminForm
inlines_list = [TimeseriesresultsInline, MeasurementResultsInline, ProfileResultsInline]
list_display = ['resultid', 'featureactionid', 'variableid', 'processing_level']
search_fields = ['variableid__variable_name__name', 'variableid__variablecode',
'variableid__variabledefinition',
'featureactionid__samplingfeatureid__samplingfeaturename',
'result_type__name', 'processing_level__definition']
actions = [duplicate_results_event]
save_as = True
def get_actions(self, request):
actions = super(ReadOnlyAdmin, self).get_actions(request)
if self.__user_is_readonly(request):
actions = list()
return actions
@staticmethod
def __user_is_readonly(request):
groups = [x.name for x in request.user.groups.all()]
return "readonly" in groups
class RelatedactionsAdminForm(ModelForm):
# actionid= ActionsModelChoiceField(Actions.objects.all().order_by('begindatetime'))
# relationshiptypecv= TermModelChoiceField(CvRelationshiptype.objects.all().order_by('term'))
# relatedactionid= ActionsModelChoiceField(Actions.objects.all().order_by('begindatetime'))
class Meta:
model = Relatedactions
fields = '__all__'
class RelatedactionsAdmin(ReadOnlyAdmin):
# For readonly usergroup
user_readonly = [p.name for p in Relatedactions._meta.get_fields() if not p.one_to_many]
user_readonly_inlines = list()
# For admin users
form = RelatedactionsAdminForm
inlines_list = list()
class OrganizationsAdminForm(ModelForm):
# organizationtypecv= TermModelChoiceField(CvOrganizationtype.objects.all().order_by('term'))
# parentorganizationid =OrganizationsModelChoiceField( Organizations.objects.all().
# order_by('organizationname'))
class Meta:
model = Organizations
fields = '__all__'
class OrganizationsAdmin(ReadOnlyAdmin):
# For readonly usergroup
user_readonly = [p.name for p in Organizations._meta.get_fields() if not p.one_to_many]
user_readonly_inlines = list()
# For admin users
form = OrganizationsAdminForm
inlines_list = list()
list_display = ('organizationname', 'organizationdescription', 'organization_link')
def organization_link(self, org):
return format_html('<a href={0} target="_blank">{0}</a>'.format(org.organizationlink))
organization_link.allow_tags = True
class SamplingFeaturesInline(admin.StackedInline):
model = Samplingfeatures
extra = 0
class ActionsInline(admin.StackedInline):
model = Actions
fieldsets = (
('Details', {
'classes': ('collapse',),
'fields': ('actionid',
'action_type',
'method',
'begindatetime',
'begindatetimeutcoffset',
'enddatetime',
'enddatetimeutcoffset',
'actiondescription',
'actionfilelink',
)
}),
)
extra = 0
class ReadOnlyActionsInline(ActionsInline):
readonly_fields = ActionsInline.fieldsets[0][1]['fields']
can_delete = False
def has_add_permission(self, request):
return False
class DerivationequationsAdminForm(ModelForm):
derivationequation = CharField(max_length=255, label="derivation equation",
widget=forms.Textarea,
help_text='use python snytax if you are using this equation to derive new' +
'values in ODM2 Admin as shown here' +
' https://en.wikibooks.org/wiki/Python_Programming/Basic_Math' +
' this currently supports 1 derived from field which should be x in the equation.' +
' the derived value must be stored in a variable y')
class Meta:
model = Derivationequations
fields = '__all__'
class DerivationequationsAdmin(ReadOnlyAdmin):
user_readonly = [p.name for p in Derivationequations._meta.get_fields() if not p.one_to_many]
user_readonly_inlines = list()
inlines_list = list()
# For admin users
form = DerivationequationsAdminForm
list_display = ['derivationequation', ]
# list_display_links = None
save_as = True
search_fields = ['derivationequationid','derivationequation']
class ResultderivationequationsAdminForm(ModelForm):
resultid = AutoCompleteSelectField('result_lookup', required=True,
help_text='result that is a product of this derivation equation',
label='Data result',show_help_text =None)
class Meta:
model = Resultderivationequations
fields = '__all__'
class ResultderivationequationsAdmin(ReadOnlyAdmin):
user_readonly = [p.name for p in Resultderivationequations._meta.get_fields() if not p.one_to_many]
user_readonly_inlines = list()
inlines_list = list()
# For admin users
form = ResultderivationequationsAdminForm
list_display = ['resultid', 'derivationequationid', ]
save_as = True
search_fields = [ 'resultid__variableid__variable_name__name',
'resultid__variableid__variablecode',
'resultid__variableid__variabledefinition',
'resultid__featureactionid__samplingfeatureid__samplingfeaturename',
'derivationequationid__derivationequation']
def create_derived_values_event(ModelAdmin, request, queryset):
StartDateProperty = Extensionproperties.objects.get(propertyname__icontains="start date")
EndDateProperty = Extensionproperties.objects.get(propertyname__icontains="end date")
qualitycode = CvQualitycode.objects.filter(name="Good").get()
censorcode = CvCensorcode.objects.filter(name="Not censored").get()
bulktimeseriesvalues = []
for relatedresults in queryset:
resultidtoderive = relatedresults.resultid #16678
tsrtoderive = Timeseriesresults.objects.get(resultid=resultidtoderive.resultid)
relatedresult = relatedresults.relatedresultid
relationshipType = relatedresults.relationshiptypecv
if not relationshipType.name == 'Is derived from':
raise forms.ValidationError("relationship type is not \'Is derived from\'")
try:
derivedenddaterepv = Resultextensionpropertyvalues.objects.filter(resultid=resultidtoderive.resultid).filter(
propertyid=EndDateProperty).get()
derivedenddate= derivedenddaterepv.propertyvalue
derivedstartdaterepv = Resultextensionpropertyvalues.objects.filter(resultid=resultidtoderive.resultid).filter(
propertyid=StartDateProperty).get()
derivedstartdate = derivedstartdaterepv.propertyvalue
except ObjectDoesNotExist:
derivedenddate='1800-01-01 00:00'
derivedstartdate='1800-01-01 00:00'
# values to derive from more recent then last derived value
fromvalues = Timeseriesresultvalues.objects.filter(resultid=relatedresult.resultid
).filter(valuedatetime__gt=derivedenddate)
# raise forms.ValidationError("derived end date: " + derivedenddate.propertyvalue +
# " derived resultid: " + str(resultidtoderive.resultid))
resultequation = Resultderivationequations.objects.filter(resultid=resultidtoderive.resultid).get()
equation = Derivationequations.objects.filter(derivationequationid=resultequation.derivationequationid.derivationequationid).get()
equationvalue = equation.derivationequation
y = 0
for vals in fromvalues:
x = vals.datavalue
d = dict(locals(), **globals())
# exec equationvalue in d
exec(equationvalue, d,d)
derivedvalue = d["y"]
# raise forms.ValidationError('original value: ' + str(x) + ' new value: ' + str(derivedvalue))
tsrv = Timeseriesresultvalues(
resultid=tsrtoderive,
datavalue=derivedvalue,
valuedatetime=vals.valuedatetime,
valuedatetimeutcoffset=4,
censorcodecv=censorcode,
qualitycodecv=qualitycode,
timeaggregationinterval=tsrtoderive
.intendedtimespacing,
timeaggregationintervalunitsid=tsrtoderive
.intendedtimespacingunitsid)
bulktimeseriesvalues.append(tsrv)
Timeseriesresultvalues.objects.bulk_create(bulktimeseriesvalues)
tsrvb = len(bulktimeseriesvalues)
newenddate = Timeseriesresultvalues.objects.filter(resultid=resultidtoderive.resultid).annotate(
Max('valuedatetime')). \
order_by('-valuedatetime')[0].valuedatetime.strftime('%Y-%m-%d %H:%M')
updateStartDateEndDate(resultidtoderive, derivedstartdate, newenddate)
messages.info(request,str(tsrvb) + " Derived time series values succesfully created, ending on "+str(newenddate))
create_derived_values_event.short_description = "create derived values based " \
" on this relationship"
class RelatedresultsAdminForm(ModelForm):
resultid = AutoCompleteSelectField('result_lookup', required=True,
help_text='result',
label='Data result' ,show_help_text =None)
relatedresultid = AutoCompleteSelectField('result_lookup', required=True,
help_text='resulted related to first result',
label='Related data result' ,show_help_text =None)
class Meta:
model = Relatedresults
fields = '__all__'
class RelatedresultsAdmin(ReadOnlyAdmin):
# For readonly usergroup
user_readonly = [p.name for p in Relatedresults._meta.get_fields() if not p.one_to_many]
user_readonly_inlines = list()
# For admin users
form = RelatedresultsAdminForm
inlines_list = list()
actions = [create_derived_values_event]
list_display = ['resultid', 'relationshiptypecv', 'relatedresultid', 'versioncode',
'relatedresultsequencenumber']
save_as = True
search_fields = ['resultid__variableid__variable_name__name',
'resultid__variableid__variablecode',
'resultid__variableid__variabledefinition',
'resultid__featureactionid__samplingfeatureid__samplingfeaturename',
'relatedresultid__variableid__variable_name__name',
'relatedresultid__variableid__variablecode',
'relatedresultid__variableid__variabledefinition',
'relatedresultid__featureactionid__samplingfeatureid__samplingfeaturename']
class FeatureactionsAdminForm(ModelForm):
samplingfeatureid = AutoCompleteSelectField('sampling_feature_lookup', required=True, help_text='',
label='Sampling feature',show_help_text =None)
class Meta:
model = Featureactions
fields = '__all__'
class FeatureactionsAdmin(ReadOnlyAdmin):
# For readonly usergroup
user_readonly = [p.name for p in Featureactions._meta.get_fields() if not p.one_to_many]
user_readonly_inlines = list()
# For admin users
form = FeatureactionsAdminForm
inlines_list = list()
list_display = ['samplingfeatureid', 'action', ]
save_as = True
search_fields = ['action__method__methodname', 'samplingfeatureid__samplingfeaturename']
class DatasetsAdminForm(ModelForm):
datasetabstract = forms.CharField(max_length=5000, widget=forms.Textarea)
class Meta:
model = Datasets
fields = '__all__'
class DatasetsAdmin(ReadOnlyAdmin):
# For readonly usergroup
user_readonly = [p.name for p in Datasets._meta.get_fields() if not p.one_to_many]
user_readonly_inlines = list()
# For admin users
form = DatasetsAdminForm
inlines_list = list()
list_display = ['datasetcode', 'datasettitle', 'datasettypecv']
def get_datasetsresults(self, object_id):
datasetResults = Datasetsresults.objects.filter(datasetid=object_id)
# raise ValidationError(datasetResults)
return datasetResults
def get_results(self, object_id):
ids = []
datasetResults = Datasetsresults.objects.filter(datasetid=object_id)
for result in datasetResults:
ids += [result.resultid.resultid]
resultsList = Results.objects.filter(resultid__in=ids)
# raise ValidationError(datasetResults)
# return queryset.filter(resultid__in=ids)
return resultsList
# What is this for?
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['DatasetResultsList'] = self.get_datasetsresults(object_id)
extra_context['ResultsList'] = self.get_results(object_id)
extra_context['prefixpath'] = settings.CUSTOM_TEMPLATE_PATH
return super(DatasetsAdmin, self).change_view(request, object_id, form_url,
extra_context=extra_context)
# class AffiliationsAdminForm(ModelForm):
# class Meta:
# model = Affiliations
# fields = '__all__'
#
#
# class AffiliationsAdmin(admin.ModelAdmin):
# form = AffiliationsAdminForm
class ActionsAdminForm(ModelForm):
actiondescription = CharField(max_length=5000, label="Action description",
widget=forms.Textarea, required=False)
action_type = make_ajax_field(Actions, 'action_type', 'cv_action_type')
action_type.help_text = u'A vocabulary for describing the type of actions performed in ' \
u'making observations. Depending' \
u' on the action type, the action may or may not produce an ' \
u'observation result. view action type ' \
u'details here <a href="http://vocabulary.odm2.org/actiontype/" ' \
u'target="_blank">http://vocabulary.odm2.org/actiontype/</a>'
action_type.allow_tags = True
class Meta:
model = Actions
fields = '__all__'
class ActionsAdmin(ReadOnlyAdmin):
# For readonly usergroup
user_readonly = [p.name for p in Actions._meta.get_fields() if not p.one_to_many]
user_readonly_inlines = [ReadOnlyFeatureActionsInline]
# For admin users
form = ActionsAdminForm
inlines_list = [FeatureActionsInline, actionByInLine]
def method_link(self, obj):
return u'<a href="{0}methods/{1}/">{2}</a>'.format(settings.CUSTOM_TEMPLATE_PATH,
obj.method.methodid, obj.method.methodname)
list_display = ('action_type', 'method_link', 'begindatetime', 'enddatetime')
list_display_links = ('action_type',)
search_fields = ['action_type__name', 'method__methodname'] # ,
method_link.short_description = 'Method'
method_link.allow_tags = True
save_as = True
class ActionByAdminForm(ModelForm):
class Meta:
model = Actionby
fields = '__all__'
class ActionByAdmin(ReadOnlyAdmin):
# For readonly usergroup
user_readonly = [p.name for p in Actionby._meta.get_fields() if not p.one_to_many]
user_readonly_inlines = list()
# For admin users
form = ActionByAdminForm
inlines_list = list()
list_display = ('actionid', 'affiliationid')
# list_display_links = ('affiliationid', 'actionid')
# list_select_related = True
class MethodsAdminForm(ModelForm):
methoddescription = CharField(max_length=5000, label="Method description",
widget=forms.Textarea, required=False)
methodtypecv = make_ajax_field(Methods, 'methodtypecv', 'cv_method_type')
methodtypecv.help_text = u'A vocabulary for describing types of Methods associated ' \
u'with creating observations. ' \
u'MethodTypes correspond with ActionTypes in ODM2. ' \
u'An Action must be performed using an ' \
| |
9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name,
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([b_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[shape_constant_name, a_constant_name, b_constant_name])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
# Verify the concat is quantized.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([concat_name])
ops = [node.op for node in eightbit_graph_def.node]
self.assertEqual(1, ops.count("QuantizedConcat"))
def test_multiple_outputs(self):
input_constant_name = "input_constant"
split_constant_name = "split_constant"
split_name = "split"
concat_constant_name = "concat_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
split_constant = quantize_graph.create_constant_node(
split_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([split_constant])
split_node = quantize_graph.create_node(
"Split", split_name, [split_constant_name, input_constant_name])
quantize_graph.set_attr_int(split_node, "num_split", 2)
quantize_graph.set_attr_dtype(split_node, "T", dtypes.float32)
float_graph_def.node.extend([split_node])
concat_constant = quantize_graph.create_constant_node(
concat_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([concat_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[concat_constant_name, split_name + ":0", split_name + ":1"])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_node_name_from_input(self):
self.assertEqual("SomeName",
quantize_graph.node_name_from_input("^SomeName:2"))
def test_unique_node_name_from_input(self):
self.assertEqual("__hat__SomeName__port__2",
quantize_graph.unique_node_name_from_input("^SomeName:2"))
def test_identity(self):
input_constant_name = "input_constant"
identity_name = "identity"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
identity_node = quantize_graph.create_node("Identity", identity_name,
[input_constant_name])
quantize_graph.set_attr_dtype(identity_node, "T", dtypes.float32)
float_graph_def.node.extend([identity_node])
mul_name = "mul"
mul_node = quantize_graph.create_node("Mul", mul_name,
[identity_name, identity_name])
quantize_graph.set_attr_dtype(mul_node, "T", dtypes.float32)
float_graph_def.node.extend([mul_node])
test_graph(float_graph_def, {}, [mul_name])
def test_keep_control_edges(self):
no_op_name = "no_op"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
graph_def.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name,
[a_constant_name, "^" + a_check_name, "^" + no_op_name])
graph_def.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = quantize_graph.create_node(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_identity_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
expected_output.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name, [a_constant_name, "^" + no_op_name])
expected_output.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_constant_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
output = graph_util.remove_training_nodes(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [add_name])
self.assertProtoEquals(expected_output, stripped_output)
def test_batch_norm(self):
input_constant_name = "input_constant"
mean_constant_name = "mean_constant"
variance_constant_name = "variance_constant"
beta_constant_name = "beta_constant"
gamma_constant_name = "gamma_constant"
batch_norm_name = "batch_norm"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6],
dtype=dtypes.float32,
shape=[1, 1, 6, 2])
float_graph_def.node.extend([input_constant])
mean_constant = quantize_graph.create_constant_node(
mean_constant_name, value=[10, 20], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([mean_constant])
variance_constant = quantize_graph.create_constant_node(
variance_constant_name,
value=[0.25, 0.5],
dtype=dtypes.float32,
shape=[2])
float_graph_def.node.extend([variance_constant])
beta_constant = quantize_graph.create_constant_node(
beta_constant_name, value=[0.1, 0.6], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([beta_constant])
gamma_constant = quantize_graph.create_constant_node(
gamma_constant_name, value=[0, 0], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([gamma_constant])
batch_norm_node = quantize_graph.create_node(
"BatchNormWithGlobalNormalization", batch_norm_name, [
input_constant_name, mean_constant_name, variance_constant_name,
beta_constant_name, gamma_constant_name
])
quantize_graph.set_attr_dtype(batch_norm_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
False)
quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
float_graph_def.node.extend([batch_norm_node])
test_graph(float_graph_def, {}, [batch_norm_name])
def test_max_pool(self):
input_constant_name = "input_constant"
max_pool_name = "max_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name,
[input_constant_name])
quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME")
float_graph_def.node.extend([max_pool_node])
test_graph(float_graph_def, {}, [max_pool_name])
def test_avg_pool(self):
input_constant_name = "input_constant"
avg_pool_name = "avg_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
[input_constant_name])
quantize_graph.set_attr_dtype(avg_pool_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
float_graph_def.node.extend([avg_pool_node])
test_graph(float_graph_def, {}, [avg_pool_name])
def test_relu(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu_node = quantize_graph.create_node("Relu", relu_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
float_graph_def.node.extend([relu_node])
test_graph(float_graph_def, {}, [relu_name])
def test_relu_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
relu_node = quantize_graph.create_node("Relu", "relu", [input_node.name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=0, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=12, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[relu_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(
[input_node, relu_node, min_node, max_node, fake_quant_node])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_relu6(self):
input_constant_name = "input_constant"
relu6_name = "relu6"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu6_node = quantize_graph.create_node("Relu6", relu6_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu6_node, "T", dtypes.float32)
float_graph_def.node.extend([relu6_node])
test_graph(float_graph_def, {}, [relu6_name])
def test_bias_add(self):
input_constant_name = "input_constant"
offset_constant_name = "offset_constant"
bias_add_name = "bias_add"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 1, 2, 6])
float_graph_def.node.extend([input_constant])
offset_constant = quantize_graph.create_constant_node(
offset_constant_name,
value=[1, 2, 3, 4, 5, 6],
dtype=dtypes.float32,
shape=[6])
float_graph_def.node.extend([offset_constant])
bias_add_node = quantize_graph.create_node(
"BiasAdd", bias_add_name, [input_constant_name, offset_constant_name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def.node.extend([bias_add_node])
test_graph(float_graph_def, {}, [bias_add_name])
def test_quantized_input_range_errors(self):
with self.assertRaises(ValueError):
# Invalid mode.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "weights_rounded",
[0, 1])
with self.assertRaises(ValueError):
# Invalid range.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "eightbit", [0, -1])
def test_quantized_input_range_bias_add(self):
input_shape = [1, 1, 2, 6]
input_n = quantize_graph.create_node("Placeholder", "input", [])
quantize_graph.set_attr_dtype(input_n, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(input_n, "shape", input_shape)
offset_n = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6])
bias_add_n = quantize_graph.create_node("BiasAdd", "bias_add",
[input_n.name, offset_n.name])
quantize_graph.set_attr_dtype(bias_add_n, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_n, offset_n, bias_add_n])
input_map = {
input_n.name + ":0":
np.reshape([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [0, 12.])
def test_quantized_input_range_mat_mul(self):
shapes = [[3, 2], [2, 4]]
inputs = []
for i, shape in enumerate(shapes):
node = quantize_graph.create_node("Placeholder", "input_%s" % i, [])
quantize_graph.set_attr_dtype(node, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(node, "shape", shape)
inputs.append(node)
mat_mul_node = quantize_graph.create_node("MatMul", "mat_mul",
[n.name for n in inputs])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(inputs + [mat_mul_node])
input_map = {
inputs[0].name + ":0":
np.reshape([1, 2, 3, 4, 5, 6], shapes[0]),
inputs[1].name + ":0":
np.reshape([.8, .7, .6, .5, .4, .3, .2, .1], shapes[1])
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [0, 6.])
def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map,
output_names, input_range):
if sys.version_info[0] == 3:
# uint8->quint8 conversion for numpy is not working currently.
return
quantized_input_map = {}
for k, v in input_map.items():
arr = [
int(
round((n - input_range[0]) * 255 / (input_range[1] - input_range[
0]))) for n in v.flat
]
arr = np.array(arr, np.uint8)
arr = arr.reshape(v.shape)
arr = arr.astype(dtypes.quint8.as_numpy_dtype)
quantized_input_map[k] = arr
output_tensors = [output_name + ":0" for output_name in output_names]
float_results = run_graph_def(float_graph_def, input_map, output_tensors)
# Quantize treating the input as quantized in range <input_range>.
rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
input_range)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, quantized_input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
# Quantize without treating input as quantized.
rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(
len(input_map), ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
def test_bias_add_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=-.5, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=15.5, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
| |
oprot.writeBinary(iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(readlines_result)
readlines_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING, 'BINARY', False), None, ), # 0
(1, TType.STRUCT, 'e', [FileSystemException, None], None, ), # 1
)
class write_args(object):
"""
Attributes:
- filedesc
- data
"""
def __init__(self, filedesc=None, data=None,):
self.filedesc = filedesc
self.data = data
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.filedesc = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.data = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('write_args')
if self.filedesc is not None:
oprot.writeFieldBegin('filedesc', TType.STRING, 1)
oprot.writeString(self.filedesc.encode('utf-8') if sys.version_info[0] == 2 else self.filedesc)
oprot.writeFieldEnd()
if self.data is not None:
oprot.writeFieldBegin('data', TType.STRING, 2)
oprot.writeBinary(self.data)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(write_args)
write_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'filedesc', 'UTF8', None, ), # 1
(2, TType.STRING, 'data', 'BINARY', None, ), # 2
)
class write_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = FileSystemException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('write_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(write_result)
write_result.thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', [FileSystemException, None], None, ), # 1
)
class writelines_args(object):
"""
Attributes:
- filedesc
- lines
"""
def __init__(self, filedesc=None, lines=None,):
self.filedesc = filedesc
self.lines = lines
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.filedesc = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.lines = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = iprot.readBinary()
self.lines.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('writelines_args')
if self.filedesc is not None:
oprot.writeFieldBegin('filedesc', TType.STRING, 1)
oprot.writeString(self.filedesc.encode('utf-8') if sys.version_info[0] == 2 else self.filedesc)
oprot.writeFieldEnd()
if self.lines is not None:
oprot.writeFieldBegin('lines', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.lines))
for iter20 in self.lines:
oprot.writeBinary(iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(writelines_args)
writelines_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'filedesc', 'UTF8', None, ), # 1
(2, TType.LIST, 'lines', (TType.STRING, 'BINARY', False), None, ), # 2
)
class writelines_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = FileSystemException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('writelines_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(writelines_result)
writelines_result.thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', [FileSystemException, None], None, ), # 1
)
class tell_args(object):
"""
Attributes:
- filedesc
"""
def __init__(self, filedesc=None,):
self.filedesc = filedesc
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.filedesc = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('tell_args')
if self.filedesc is not None:
oprot.writeFieldBegin('filedesc', TType.STRING, 1)
oprot.writeString(self.filedesc.encode('utf-8') if sys.version_info[0] == 2 else self.filedesc)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(tell_args)
tell_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'filedesc', 'UTF8', None, ), # 1
)
class tell_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = FileSystemException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('tell_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(tell_result)
tell_result.thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', [FileSystemException, None], None, ), # 1
)
class seek_args(object):
"""
Attributes:
- filedesc
- position
"""
def __init__(self, filedesc=None, position=None,):
self.filedesc = filedesc
self.position = position
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.filedesc = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.position = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
| |
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 189
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
204 212 205 213 300 308 301 309
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 190
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
205 213 206 214 301 309 302 310
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 191
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
206 214 207 215 302 310 303 311
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 192
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
207 215 208 216 303 311 304 312
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 193
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
216 224 209 217 312 320 305 313
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 194
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
209 217 210 218 305 313 306 314
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 195
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
210 218 211 219 306 314 307 315
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 196
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
211 219 212 220 307 315 308 316
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 197
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
212 220 213 221 308 316 309 317
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 198
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
213 221 214 222 309 317 310 318
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 199
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
214 222 215 223 310 318 311 319
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 200
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
215 223 216 224 311 319 312 320
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0
1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
Element: 201
Faces:
-1 -1 -1 -1 -1 -1
Nodes:
224 232 217 225 320 328 313 321
Scale factors:
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0 1.0
1.0 1.0 1.0 | |
<filename>qPyMultiThread.py
# -*- coding=iso-8859-1 -*-
# Copyright 2019 Qualys Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
-------------------------------------------------------------------
qPyMultiThread.py
-------------------------------------------------------------------
This tool is an example of using the practices outlined in the
Qualys v2 API User Guide under the Best Practices Section
https://www.qualys.com/docs/qualys-api-vmpc-user-guide.pdf
Recommendations To improve performance, Multi-threading should be used.
Here is an outline of what the POC multi-threading script does to obtain
the maximum throughput:
1. Make an initial API call to the Host List API endpoint to retrieve all host IDs for the subscription that
need to have data retrieved.
Note: Its important to do any filtering on hosts at this point, as filtering during the detection pull
can impact performance. Host List API Endpoint:
https://<qualysapi url>/api/2.0/fo/asset/host/
2. Break the total Host IDs into batches of 1,000-5,000 and send to a Queue.
3. Launch X worker threads that will pull the batches from the Queue and launch an API call against:
https://<qualysapi url>/ api/2.0/fo/asset/host/vm/detection/ Using Parameters:
params = dict(
action='list',
show_igs=0,
show_reopened_info=1,
active_kernels_only=1,
output_format='XML',
status='Active,Re-Opened,New',
vm_processed_after=<Date in UTC>, # Formatted as: '2019-04-05T00:00:01Z'
truncation_limit = 0,
ids=ids
)
Considerations
Batch size
On the backend, the host detection engine will break up the number of hosts to retrieve information on
with a maximum size of 10,000. Using a batch size higher than this will not add any benefit to performance. In the
same context, there are multiple places that need to pull information so there is an overhead cost regardless of the
size being used. For that reason, using a batch size too small can start to hinder performance slightly due to the
overhead being used on small requests. Different parameters and the amount of total data on the backend can make
requests vary in duration, it is best to experiment with different batch size?s during peak and non-peak hours to
determine the optimal size to use.
Error Handling
Robust error handling and logging is key to any automation and is recommended to implement mechanisms
to catch exceptions and retry with exponential back off when errors are encountered. This includes all functions
dealing with connection requests, parsing, or writing to disk. Taking care to log as much precise detail as possible
so it will be easier to audit later should the need arise.
Parsing
If an error is encountered, the API will return an error code and a description of the error,
which will look like this:
Simple Return with error:
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE GENERIC_RETURN SYSTEM "https://qualysapi.qualys.com/api/2.0/simple_return.dtd?>
<SIMPLE_RETURN>
<RESPONSE>
<DATETIME>2018-02-14T02:51:36Z</DATETIME>
<CODE>1234</CODE>
<TEXT>Description of Error</TEXT>
</RESPONSE>
</SIMPLE_RETURN>
Generic Return with error:
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE GENERIC_RETURN SYSTEM "https://qualysapi.qualys.com/generic_return.dtd">
<GENERIC_RETURN>
<API name="index.php" username="username at="2018-02-13T06:09:27Z">
<RETURN status="FAILED" number="999">Internal error. Please contact customer support.</RETURN>
</GENERIC_RETURN>
<!-- Incident signature: 123a12b12c1de4f12345678901a12a12 //-->
A full list of Error code Responses can be found
in the API User Guide in Appendix 1
https://www.qualys.com/docs/qualys-api-vmpc-user-guide.pdf
Connection Errors
With retrieving large amounts of data sets and continuously streaming through the API for prolonged periods of time,
comes the possibility of running into edge cases with regards to connections. Whichever method is used to make the
outbound connection to the API endpoint, it is recommended to set a timeout to abort/retry a connection if it hasn?t
been established in a reasonable amount of time. This is to prevent stalling out a thread, resulting in reduced
performance. Also consider these types of connection errors, amongst others:
- Empty Responses
- Timeouts
- Connection Reset or Internal Error responses. Status codes: 503, 500.
- Connection Closed
These can be caused by either side of the connection, so need to be caught, logged,
and if they continue then investigated.
"""
# ---------
# Library Imports
# ---------
import copy
import ssl
import sys
import time
from optparse import IndentedHelpFormatter, OptionGroup, OptionParser, textwrap
from random import randint
from threading import Thread, current_thread
import ipaddress
from ipaddress import NetmaskValueError, AddressValueError
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
# Check Python version so we can import the appropriate libraries
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
if is_py2:
import Queue as queue
from urllib2 import urlopen, ProxyHandler, build_opener, install_opener, Request, HTTPSHandler, HTTPHandler
from urllib2 import HTTPError, URLError
from httplib import HTTPSConnection, HTTPException
str = unicode
if is_py3:
import queue
from builtins import object
from builtins import next
from builtins import range
from builtins import str
from future.moves.urllib.request import urlopen, ProxyHandler, build_opener, install_opener
from future.moves.urllib.request import Request, HTTPSHandler, HTTPHandler
from future.moves.urllib.error import HTTPError, URLError
from http.client import HTTPSConnection
from http.client import HTTPException
from builtins import input as raw_input
str = str
# ---------
# Local Imports
# ---------
import lib
from lib.APIResponse import APIResponse, APIResponseError, XMLFileBufferedResponse
from lib.APIRequest import APIRequest
from lib.configuration import Configuration
from lib.loghandler import *
from lib import utils
logHandler = logHandler('qPyMultiThread')
init = Configuration()
init.setupFileStructure()
NORETRYCODES = [400]
class HTTPSConnectionWithKeepAlive(HTTPSConnection):
"""TCP KEEPALIVE
In order to set tcp keepalive we need to subclass HTTPSHandler
Here is the source code for HTTPSHandler from urllib2 github repo
https://github.com/python/cpython/blob/2.7/Lib/urllib2.py
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, debuglevel=0, context=None):
AbstractHTTPHandler.__init__(self, debuglevel)
self._context = context
def https_open(self, req):
return self.do_open(httplib.HTTPSConnection, req,
context=self._context)
https_request = AbstractHTTPHandler.do_request_
As urllib2.HTTPSHandler uses httplib.HTTPSConnection we need to subclass this also.
The connect method would create the socket.
def connect(self):
"Connect to a host on a given (SSL) port."
HTTPConnection.connect(self)
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock,
server_hostname=server_hostname)
This is the method we would need to add the socket option for keep-alive.
Now, one of the challenge with low level TCP settings is that
TCP stack for each OS has a different settings.
Usage of the new Handler:
http_handler = HTTPSHandlerWithKeepAlive()
opener = urllib2.build_opener(http_handler)
urllib2.install_opener(opener)
"""
def connect(self):
HTTPSConnection.connect(self)
keepalive_idle_sec = 50
keepalive_interval_sec = 10
keep_alive_max_fail = 25
# Identify each OS and set the socket options
# All possible values:
# https://docs.python.org/2/library/sys.html#sys.platform
if sys.platform.startswith('linux'):
# LINUX is pretty straight forward
# setsockopt supports all the values
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, keepalive_idle_sec)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, keepalive_interval_sec)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, keep_alive_max_fail)
elif sys.platform.startswith('darwin'):
# MAC OSX - is similar to linux but the only probelem is that
# on OSX python socket module does not export TCP_KEEPIDLE,TCP_KEEPINTVL,TCP_KEEPCNT constant.
# Taking the value for TCP_KEEPIDLE from darwin tcp.h
# https://github.com/apple/darwin-xnu/blob/master/bsd/netinet/tcp.h
# define TCP_KEEPALIVE 0x10 /* idle time used when SO_KEEPALIVE is enabled */
# define TCP_KEEPINTVL 0x101 /* interval between keepalives */
# define TCP_KEEPCNT 0x102 /* number of keepalives before close */
# TCP_KEEPINTVL and TCP_KEEPCNT were added 5 years ago. So, older OSX would not support it.
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self.sock.setsockopt(socket.IPPROTO_TCP, 0x10, keepalive_idle_sec)
self.sock.setsockopt(socket.IPPROTO_TCP, 0x101, keepalive_interval_sec)
self.sock.setsockopt(socket.IPPROTO_TCP, 0x102, keep_alive_max_fail)
elif sys.platform.startswith('win'):
# WINDOWS - To set TCP Keepalive on windows need to use sock.ioctl and more info can be found here
# https://msdn.microsoft.com/en-us/library/dd877220%28v=vs.85%29.aspx
# The time is in milliseconds
self.sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, keepalive_idle_sec * 1000, keepalive_interval_sec * 1000))
class Formatter(IndentedHelpFormatter):
def format_description(self, description):
if not description:
return ""
desc_width = 150 - self.current_indent
indent = " " * self.current_indent
# the above is still the same
bits = description.split('\n')
formatted_bits = [textwrap.fill(bit,
desc_width,
initial_indent = indent,
subsequent_indent = indent) for bit in bits]
result = "\n".join(formatted_bits) + "\n"
return result
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
self._short_opt_fmt = "%s"
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt) for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar) for | |
param.Parameter(default=None, doc="""
Ticks along x-axis specified as an integer, explicit list of
tick locations or bokeh Ticker object. If set to None default
bokeh ticking behavior is applied.""")
yticks = param.Parameter(default=None, doc="""
Ticks along y-axis specified as an integer, explicit list of
tick locations or bokeh Ticker object. If set to None
default bokeh ticking behavior is applied.""")
# A dictionary mapping of the plot methods used to draw the
# glyphs corresponding to the ElementPlot, can support two
# keyword arguments a 'single' implementation to draw an individual
# plot and a 'batched' method to draw multiple Elements at once
_plot_methods = {}
# Declares the options that are propagated from sub-elements of the
# plot, mostly useful for inheriting options from individual
# Elements on an OverlayPlot. Enabled by default in v1.7.
_propagate_options = []
v17_option_propagation = True
def __init__(self, element, keys=None, ranges=None, dimensions=None,
batched=False, overlaid=0, cyclic_index=0, zorder=0, style=None,
overlay_dims={}, stream_sources=[], streams=None, **params):
self.zorder = zorder
self.cyclic_index = cyclic_index
self.overlaid = overlaid
self.batched = batched
self.overlay_dims = overlay_dims
if not isinstance(element, (HoloMap, DynamicMap)):
self.hmap = HoloMap(initial_items=(0, element),
kdims=['Frame'], id=element.id)
else:
self.hmap = element
if overlaid:
self.stream_sources = stream_sources
else:
self.stream_sources = compute_overlayable_zorders(self.hmap)
plot_element = self.hmap.last
if self.batched and not isinstance(self, GenericOverlayPlot):
plot_element = plot_element.last
dynamic = isinstance(element, DynamicMap) and not element.unbounded
self.top_level = keys is None
if self.top_level:
dimensions = self.hmap.kdims
keys = list(self.hmap.data.keys())
self.style = self.lookup_options(plot_element, 'style') if style is None else style
plot_opts = self.lookup_options(plot_element, 'plot').options
if self.v17_option_propagation:
inherited = self._traverse_options(plot_element, 'plot',
self._propagate_options,
defaults=False)
plot_opts.update(**{k: v[0] for k, v in inherited.items()})
super(GenericElementPlot, self).__init__(keys=keys, dimensions=dimensions,
dynamic=dynamic,
**dict(params, **plot_opts))
self.streams = get_nested_streams(self.hmap) if streams is None else streams
if self.top_level:
self.comm = self.init_comm()
self.traverse(lambda x: setattr(x, 'comm', self.comm))
# Attach streams if not overlaid and not a batched ElementPlot
if not (self.overlaid or (self.batched and not isinstance(self, GenericOverlayPlot))):
attach_streams(self, self.hmap)
# Update plot and style options for batched plots
if self.batched:
self.ordering = util.layer_sort(self.hmap)
overlay_opts = self.lookup_options(self.hmap.last, 'plot').options.items()
opts = {k: v for k, v in overlay_opts if k in self.params()}
self.set_param(**opts)
self.style = self.lookup_options(plot_element, 'style').max_cycles(len(self.ordering))
else:
self.ordering = []
def get_zorder(self, overlay, key, el):
"""
Computes the z-order of element in the NdOverlay
taking into account possible batching of elements.
"""
spec = util.get_overlay_spec(overlay, key, el)
return self.ordering.index(spec)
def _updated_zorders(self, overlay):
specs = [util.get_overlay_spec(overlay, key, el)
for key, el in overlay.data.items()]
self.ordering = sorted(set(self.ordering+specs))
return [self.ordering.index(spec) for spec in specs]
def _get_frame(self, key):
if isinstance(self.hmap, DynamicMap) and self.overlaid and self.current_frame:
self.current_key = key
return self.current_frame
elif key == self.current_key and not self._force:
return self.current_frame
cached = self.current_key is None
key_map = dict(zip([d.name for d in self.dimensions], key))
frame = get_plot_frame(self.hmap, key_map, cached)
traverse_setter(self, '_force', False)
if not key in self.keys and self.dynamic:
self.keys.append(key)
self.current_frame = frame
self.current_key = key
return frame
def _execute_hooks(self, element):
"""
Executes finalize hooks
"""
for hook in self.finalize_hooks:
try:
hook(self, element)
except Exception as e:
self.warning("Plotting hook %r could not be applied:\n\n %s" % (hook, e))
def get_extents(self, view, ranges):
"""
Gets the extents for the axes from the current View. The globally
computed ranges can optionally override the extents.
"""
ndims = len(view.dimensions())
num = 6 if self.projection == '3d' else 4
if self.apply_ranges:
if ranges:
dims = view.dimensions()
x0, x1 = ranges[dims[0].name]
if ndims > 1:
y0, y1 = ranges[dims[1].name]
else:
y0, y1 = (np.NaN, np.NaN)
if self.projection == '3d':
if len(dims) > 2:
z0, z1 = ranges[dims[2].name]
else:
z0, z1 = np.NaN, np.NaN
else:
x0, x1 = view.range(0)
y0, y1 = view.range(1) if ndims > 1 else (np.NaN, np.NaN)
if self.projection == '3d':
z0, z1 = view.range(2)
if self.projection == '3d':
range_extents = (x0, y0, z0, x1, y1, z1)
else:
range_extents = (x0, y0, x1, y1)
else:
range_extents = (np.NaN,) * num
if self.apply_extents:
norm_opts = self.lookup_options(view, 'norm').options
if norm_opts.get('framewise', False) or self.dynamic:
extents = view.extents
else:
extent_list = self.hmap.traverse(lambda x: x.extents, [Element])
extents = util.max_extents(extent_list, self.projection == '3d')
else:
extents = (np.NaN,) * num
if getattr(self, 'shared_axes', False) and self.subplot:
return util.max_extents([range_extents, extents], self.projection == '3d')
else:
max_extent = []
for l1, l2 in zip(range_extents, extents):
if isfinite(l2):
max_extent.append(l2)
else:
max_extent.append(l1)
return tuple(max_extent)
def _get_axis_labels(self, dimensions, xlabel=None, ylabel=None, zlabel=None):
if dimensions and xlabel is None:
xlabel = dim_axis_label(dimensions[0]) if dimensions[0] else ''
if len(dimensions) >= 2 and ylabel is None:
ylabel = dim_axis_label(dimensions[1]) if dimensions[1] else ''
if self.projection == '3d' and len(dimensions) >= 3 and zlabel is None:
zlabel = dim_axis_label(dimensions[2]) if dimensions[2] else ''
return xlabel, ylabel, zlabel
def _format_title(self, key, dimensions=True, separator='\n'):
frame = self._get_frame(key)
if frame is None: return None
type_name = type(frame).__name__
group = frame.group if frame.group != type_name else ''
label = frame.label
if self.layout_dimensions:
dim_title = self._frame_title(key, separator=separator)
title = dim_title
else:
if dimensions:
dim_title = self._frame_title(key, separator=separator)
else:
dim_title = ''
title_format = util.bytes_to_unicode(self.title_format)
title = title_format.format(label=util.bytes_to_unicode(label),
group=util.bytes_to_unicode(group),
type=type_name,
dimensions=dim_title)
return title.strip(' \n')
def update_frame(self, key, ranges=None):
"""
Set the plot(s) to the given frame number. Operates by
manipulating the matplotlib objects held in the self._handles
dictionary.
If n is greater than the number of available frames, update
using the last available frame.
"""
class GenericOverlayPlot(GenericElementPlot):
"""
Plotting baseclass to render (Nd)Overlay objects. It implements
methods to handle the creation of ElementPlots, coordinating style
groupings and zorder for all layers across a HoloMap. It also
allows collapsing of layers via the Compositor.
"""
batched = param.Boolean(default=True, doc="""
Whether to plot Elements NdOverlay in a batched plotting call
if possible. Disables legends and zorder may not be preserved.""")
legend_limit = param.Integer(default=25, doc="""
Number of rendered glyphs before legends are disabled.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
style_grouping = param.Integer(default=2, doc="""
The length of the type.group.label spec that will be used to
group Elements into style groups. A style_grouping value of
1 will group just by type, a value of 2 will group by type and
group, and a value of 3 will group by the full specification.""")
_passed_handles = []
def __init__(self, overlay, ranges=None, batched=True, keys=None, group_counter=None, **params):
if 'projection' not in params:
params['projection'] = self._get_projection(overlay)
super(GenericOverlayPlot, self).__init__(overlay, ranges=ranges, keys=keys,
batched=batched, **params)
# Apply data collapse
self.hmap = self._apply_compositor(self.hmap, ranges, self.keys)
self.map_lengths = Counter()
self.group_counter = Counter() if group_counter is None else group_counter
self.zoffset = 0
self.subplots = self._create_subplots(ranges)
self.traverse(lambda x: setattr(x, 'comm', self.comm))
self.top_level = keys is None
if self.top_level:
self.comm = self.init_comm()
self.traverse(lambda x: setattr(x, 'comm', self.comm))
self.traverse(lambda x: attach_streams(self, x.hmap, 2),
[GenericElementPlot])
def _apply_compositor(self, holomap, ranges=None, keys=None, dimensions=None):
"""
Given a HoloMap compute the appropriate (mapwise or framewise)
ranges in order to apply the Compositor collapse operations in
display mode (data collapse should already have happened).
"""
# Compute framewise normalization
defaultdim = holomap.ndims == 1 and holomap.kdims[0].name != 'Frame'
if keys and ranges and dimensions and not defaultdim:
dim_inds = [dimensions.index(d) for d in holomap.kdims]
sliced_keys = [tuple(k[i] for i in dim_inds) for k in keys]
frame_ranges = OrderedDict([(slckey, self.compute_ranges(holomap, key, ranges[key]))
for key, slckey in zip(keys, sliced_keys) if slckey in holomap.data.keys()])
else:
mapwise_ranges = self.compute_ranges(holomap, None, None)
frame_ranges = OrderedDict([(key, self.compute_ranges(holomap, key, mapwise_ranges))
for key in holomap.data.keys()])
ranges = frame_ranges.values()
return Compositor.collapse(holomap, (ranges, frame_ranges.keys()), mode='display')
def _create_subplots(self, ranges):
# Check if plot should be batched
ordering = util.layer_sort(self.hmap)
batched = self.batched and type(self.hmap.last) is NdOverlay
if batched:
backend = self.renderer.backend
batchedplot = Store.registry[backend].get(self.hmap.last.type)
if (batched and batchedplot and 'batched' in batchedplot._plot_methods and
(not self.show_legend or len(ordering) > self.legend_limit)):
self.batched = True
keys, vmaps = [()], [self.hmap]
else:
self.batched = False
keys, vmaps = self.hmap.split_overlays()
if isinstance(self.hmap, DynamicMap):
dmap_streams = [get_nested_streams(layer) for layer in
split_dmap_overlay(self.hmap)]
else:
dmap_streams = [None]*len(keys)
# Compute global ordering
length = self.style_grouping
group_fn = lambda x: (x.type.__name__, x.last.group, x.last.label)
for m in vmaps:
self.map_lengths[group_fn(m)[:length]] += 1
subplots = OrderedDict()
for (key, vmap, streams) in zip(keys, vmaps, dmap_streams):
subplot = self._create_subplot(key, vmap, streams, ranges)
if subplot is None:
continue
if | |
import pytest
from adlib27.autodiff import AutoDiff as AD
import numpy as np
# Testing the getters and setters
def test_getters():
x = AD(val=[10])
value = x.val
derivative = x.der
assert value == pytest.approx([10], rel=1e-4)
for d in derivative:
assert d == pytest.approx([1])
def test_setters():
x = AD(val=[10])
x.val[0] = 5
x.der[0][0] = 0
assert x.val == pytest.approx([5])
for d in x.der:
assert d == pytest.approx([0])
# Testing the comparison operations
def test_ne_const():
x = AD(val=[10])
y = 10
assert x != y
def test_ne_different_AD():
x = AD(val=[10], index=0, magnitude=2)
y = AD(val=[20], index=1, magnitude=2)
assert x != y
def test_eq_AD():
x1 = AD(val=[10])
x2 = AD(val=[10])
assert x1 == x2
def test_ne_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
assert x != y
def test_eq_AD_many_val():
x1 = AD(val=[10, 1, 3, 4])
x2 = AD(val=[10, 1, 3, 4])
assert x1 == x2
# Testing the Unary operations
def test_neg():
x = AD(val=[10])
y = -x
assert y.val == pytest.approx([-10])
for d in y.der:
assert d == pytest.approx([-1])
def test_neg_different_AD():
x = AD(val=[10], index=0, magnitude=2)
y = AD(val=[20], index=1, magnitude=2)
z = -(x + y)
assert z.val == pytest.approx([-30])
for d in z.der:
assert d == pytest.approx([-1])
def test_neg_many_val():
x = AD(val=[10, 1, 3, 4])
y = -x
assert y.val == pytest.approx([-10, -1, -3, -4])
for d in y.der:
assert d == pytest.approx([-1, -1, -1, -1])
def test_neg_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
z = -(x + y)
assert z.val == pytest.approx([-8, 1, -4.2, -104])
for d in z.der:
assert d == pytest.approx([-1, -1, -1, -1])
def test_pos():
x = AD(val=[-15])
y = +x
assert y.val == pytest.approx([-15])
for d in y.der:
assert d == pytest.approx([1])
def test_pos_different_AD():
x = AD(val=[10], index=0, magnitude=2)
y = AD(val=[-20], index=1, magnitude=2)
z = +(x + y)
assert z.val == pytest.approx([-10])
for d in z.der:
assert d == pytest.approx([1])
def test_pos_many_val():
x = AD(val=[10, 1, 3, 4])
y = +x
assert y.val == pytest.approx([10, 1, 3, 4])
for d in y.der:
assert d == pytest.approx([1, 1, 1, 1])
def test_pos_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
z = +(x + y)
assert z.val == pytest.approx([8, -1, 4.2, 104])
for d in z.der:
assert d == pytest.approx([1, 1, 1, 1])
# Testing the basic operations (+, -, *, /)
# Testing the add and radd
def test_add_const():
x = AD(val=[10])
y = x + 22
assert y.val == pytest.approx([32])
for d in y.der:
assert d == pytest.approx([1])
def test_add_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x + 17
assert y.val == pytest.approx([16, 17, 22, 27])
for d in y.der:
assert d == pytest.approx([1, 1, 1, 1])
def test_add_AD():
x = AD(val=[3])
y = x + x
assert y.val == pytest.approx([6])
for d in y.der:
assert d == pytest.approx([2])
def test_add_AD_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x + x
assert y.val == pytest.approx([-2, 0, 10, 20])
for d in y.der:
assert d == pytest.approx([2, 2, 2, 2])
def test_add_different_AD():
x = AD(val=[3], index=0, magnitude=2)
y = AD(val=[4], index=1, magnitude=2)
z = x + y
assert z.val == pytest.approx([7])
for d in z.der:
assert d == pytest.approx([1])
def test_add_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
z = x + y
assert z.val == pytest.approx([8, -1, 4.2, 104])
for d in z.der:
assert d == pytest.approx([1, 1, 1, 1])
def test_radd_const():
x = AD(val=[10])
y = 5 + x
assert y.val == pytest.approx([15])
for d in y.der:
assert d == pytest.approx([1])
def test_radd_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = 17 + x
assert y.val == pytest.approx([16, 17, 22, 27])
for d in y.der:
assert d == pytest.approx([1, 1, 1, 1])
# Testing the sub and rsub
def test_sub_const():
x = AD(val=[10])
y = x - 3
assert y.val == pytest.approx([7])
for d in y.der:
assert d == pytest.approx([1])
def test_sub_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x - 3
assert y.val == pytest.approx([-4, -3, 2, 7])
for d in y.der:
assert d == pytest.approx([1, 1, 1, 1])
def test_sub_AD():
x = AD(val=[14])
y = x - x
assert y.val == pytest.approx([0])
for d in y.der:
assert d == pytest.approx([0])
def test_sub_AD_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x - x
assert y.val == pytest.approx([0, 0, 0, 0])
for d in y.der:
assert d == pytest.approx([0, 0, 0, 0])
def test_sub_different_AD():
x = AD(val=[3], index=0, magnitude=2)
y = AD(val=[4], index=1, magnitude=2)
z = x - y
assert z.val == pytest.approx([-1])
assert z.der[0] == pytest.approx([1])
assert z.der[1] == pytest.approx([-1])
def test_sub_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
z = x - y
assert z.val == pytest.approx([12, -1, 2.2, -96])
assert z.der[0] == pytest.approx([1, 1, 1, 1])
assert z.der[1] == pytest.approx([-1, -1, -1, -1])
def test_rsub_const():
x = AD(val=[1])
y = 7 - x
assert y.val == pytest.approx([6])
for d in y.der:
assert d == pytest.approx([-1])
def test_rsub_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = 17 - x
assert y.val == pytest.approx([18, 17, 12, 7])
for d in y.der:
assert d == pytest.approx([-1, -1, -1, -1])
# Testing the mul and rmul
def test_mul_const():
x = AD(val=[10])
y = x * 3
assert y.val == pytest.approx([30])
for d in y.der:
assert d == pytest.approx([3])
def test_mul_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x * 3
assert y.val == pytest.approx([-3, 0, 15, 30])
for d in y.der:
assert d == pytest.approx([3, 3, 3, 3])
def test_mul_AD():
x = AD(val=[4])
y = x * x
assert y.val == pytest.approx([16])
for d in y.der:
assert d == pytest.approx([8])
def test_mul_AD_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x * x
assert y.val == pytest.approx([1, 0, 25, 100])
for d in y.der:
assert d == pytest.approx([-2, 0, 10, 20])
def test_mul_different_AD():
x = AD(val=[3], index=0, magnitude=2)
y = AD(val=[4], index=1, magnitude=2)
z = x * y
assert z.val == pytest.approx([12])
assert z.der[0] == pytest.approx([4])
assert z.der[1] == pytest.approx([3])
def test_mul_different_AD_many_val():
x = AD(val=[10, -1, 3.2, 4], index=0, magnitude=2)
y = AD(val=[-2, 0, 1, 100], index=1, magnitude=2)
z = x * y
assert z.val == pytest.approx([-20, 0, 3.2, 400])
assert z.der[0] == pytest.approx([-2, 0, 1, 100])
assert z.der[1] == pytest.approx([10, -1, 3.2, 4])
def test_rmul_const():
x = AD(val=[1])
y = 7 * x
assert y.val == pytest.approx([7])
for d in y.der:
assert d == pytest.approx([7])
def test_rmul_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = 7 * x
assert y.val == pytest.approx([-7, 0, 35, 70])
for d in y.der:
assert d == pytest.approx([7, 7, 7, 7])
# Testing the div and rdiv
def test_div_const():
x = AD(val=[20])
y = x / 4
assert y.val == pytest.approx([5])
for d in y.der:
assert d == pytest.approx([0.25])
def test_div_const_many_val():
x = AD(val=[-1, 0, 5, 10])
y = x / 4
assert y.val == pytest.approx([-0.25, 0, 1.25, 2.5])
for d in y.der:
assert d == pytest.approx([0.25, 0.25, 0.25, 0.25])
def test_div_AD():
x = AD(val=[4])
y = x / x
assert y.val == pytest.approx([1])
for d in y.der:
assert d == pytest.approx([0])
def test_div_AD_many_val():
x = AD(val=[-1, 1, 5, 10])
y = x / x
assert y.val == pytest.approx([1, 1, 1, 1])
for d in y.der:
assert d == pytest.approx([0, 0, 0, 0])
def test_div_different_AD():
x = AD(val=[2], index=0, magnitude=2)
y = AD(val=[4], index=1, magnitude=2)
z = x / y
assert z.val == pytest.approx([0.5])
assert z.der[0] == pytest.approx([0.25])
assert z.der[1] == pytest.approx([-0.125])
def test_div_different_AD_many_val():
x = AD(val=[-2, 4,10, 100], index=0, magnitude=2)
y = AD(val=[1, 2, 2, 4], index=1, magnitude=2)
z = x / y
assert z.val == pytest.approx([-2, 2, 5, 25])
assert z.der[0] == pytest.approx([1, 0.5, 0.5, 0.25])
assert z.der[1] == pytest.approx([2, -1, -2.5, -6.25])
def test_rdiv_const():
x = AD(val=[1])
y = 7 / x
assert y.val == pytest.approx([7])
for d in y.der:
assert d == pytest.approx([-7])
def test_rdiv_const_many_val():
x = AD(val=[-1, 1, 7, 14])
y = 7 / x
assert y.val == pytest.approx([-7, 7, | |
'', 'title': '', 'genre': ''}
logging.debug("Getting metadata")
args = list(GLOBAL_SETTINGS['ffmpeg-flags'])
args.extend(['-i', path, '-f', 'ffmetadata', '-'])
process = subprocess.Popen(args, stdout=subprocess.PIPE)
output = process.communicate()
data = output[0].splitlines()
data.sort()
for l in data:
info = l.decode().split('=')
if len(info) > 1:
response[info[0]] = info[1]
# get track length
args = list(GLOBAL_SETTINGS['ffprobe-flags'])
args.append(path)
process = subprocess.Popen(args, stdout=subprocess.PIPE)
output = process.communicate()
response['length'] = output[0].decode().strip()
response['size'] = os.path.getsize(path)
return response
def get_audio_metadata(self, identifier):
response = {'artist': '', 'album': '', 'title': '', 'genre': ''}
path = self.get_file_path(identifier)
if path is None: return response
return self.get_file_metadata(path)
def search_media(self, key):
key = key.lower()
response = {}
for k, value in self.mapping.items():
if not value['directory'] and key in value['name'].lower():
response['{}'.format(value['id'])] = 1
# response['results'].append(k)
return response
def is_transcoding(self, id):
return self.transcodeProcess[id].poll()
def transcode_audio(self, path, quality=None, fmt=None):
if fmt is None:
fmt = GLOBAL_SETTINGS['stream-format']
if quality is None or quality.lower() not in STREAM_QUALITY['{}'.format(fmt)]:
selections = STREAM_QUALITY[
"{}".format(GLOBAL_SETTINGS['stream-format'])]
quality = selections[len(selections) // 2]
#check if audio has already been previously transcoded
if len(TRANSCODE_CACHE) > 0:
for c in TRANSCODE_CACHE:
if c['infile'] == path:
logging.debug("FOUND CACHE OBJ: ")
logging.debug(c)
return (c['outfile'], c['proc'])
self.transcodeID = (self.transcodeID +
1) % GLOBAL_SETTINGS['max-transcodes']
proc = self.transcodeProcess[self.transcodeID]
try:
if proc is not None and proc.poll() and os.getpgid(proc.pid):
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
except ProcessLookupError:
logging.debug("Process: " + str(proc.pid) + " no longer exists....")
ext = os.path.splitext(path)
outfile = os.path.join(GLOBAL_SETTINGS["cache-dir"], "transcoded{}.audio".format(self.transcodeID))
#delete old cache file before transcoding new one
try:
tfh = open(outfile, 'rb')
tfh.close()
os.unlink(outfile)
except:
pass
args = list(GLOBAL_SETTINGS['ffmpeg-flags'])
args.extend(TRANSCODE_CMD['{}'.format(fmt)])
args[args.index("{infile}")] = path
args[args.index("{quality}")] = quality
args[args.index("{outfile}")] = outfile
logging.debug(args)
self.transcodeProcess[self.transcodeID] = subprocess.Popen(args)
cacheobj = {
'infile': path,
'outfile': outfile,
'proc': self.transcodeProcess[self.transcodeID],
'fmt': fmt,
'quality': quality
}
if len(TRANSCODE_CACHE) < GLOBAL_SETTINGS['max-transcodes']:
TRANSCODE_CACHE.append(cacheobj)
else:
TRANSCODE_CACHE[self.transcodeID] = cacheobj
return (outfile, self.transcodeProcess[self.transcodeID])
def isalbum_art_cached(self):
return os.path.exists(self.art_cache_path)
def clear_album_cache(self):
if self.isalbum_art_cached():
os.unlink(self.art_cache_path)
def extract_album_art(self, filepath):
self.clear_album_cache()
args = list(GLOBAL_SETTINGS['ffmpeg-flags'])
outfile = self.art_cache_path
args.extend(COVERART_CMD)
args[args.index("{infile}")] = filepath
args[args.index("{outfile}")] = outfile
logging.debug(args)
coverProc = subprocess.Popen(args)
res = coverProc.communicate()
code = coverProc.returncode
if not self.isalbum_art_cached():
code = -1
return outfile, code
def cache_album_art(self, audiopath, covername):
self.clear_album_cache()
basepath = os.path.dirname(audiopath)
outfile = self.art_cache_path
shutil.copy2(os.path.join(basepath, covername), outfile)
return outfile, 0
def save_rescan_diff(self, filehash, deleted):
self.listDiffs.append(ListHistory(int(time.time()), filehash, deleted))
def latest_rescan_diff(self):
if len(self.listDiffs) < 1: return 0
return self.listDiffs[-1].date
def get_rescan_diffs(self, lastUpdate):
#return a list of all diffs after last update
diffList = []
for diff in self.listDiffs:
if diff.date > lastUpdate:
diffList.append(diff)
return diffList
'''==================================================
Program Entry
=================================================='''
class Startup:
def args(self):
# get port number
try:
idx = sys.argv.index('-p')
if idx + 1 < len(sys.argv):
GLOBAL_SETTINGS['server-port'] = sys.argv[idx + 1]
else:
logging.error("Missing port value!")
exit(1)
except:
logging.info("Using default port: {}".format(
GLOBAL_SETTINGS['server-port']))
try:
idx = sys.argv.index('-password')
if idx + 1 < len(sys.argv):
GLOBAL_SETTINGS['password'] = sys.argv[idx + 1]
else:
logging.error("Missing password value!")
exit(1)
except:
GLOBAL_SETTINGS['password'] = GLOBAL_SETTINGS['default-password']
logging.info("Using default password: {}".format(GLOBAL_SETTINGS['password']))
GLOBAL_SETTINGS['music-dir'] = sys.argv[-1]
def envvars(self):
GLOBAL_SETTINGS['server-port'] = int(os.environ.get('RMP_PORT')) if os.environ.get('RMP_PORT') else GLOBAL_SETTINGS['server-port']
print("PORT: " + str(GLOBAL_SETTINGS['server-port']))
GLOBAL_SETTINGS['password'] = os.environ.get('RMP_PASSWORD') if os.environ.get('RMP_PASSWORD') else GLOBAL_SETTINGS['password']
print("Password: " + GLOBAL_SETTINGS['password'])
GLOBAL_SETTINGS['music-dir'] = os.environ.get('RMP_MUSIC_DIR') if os.environ.get('RMP_MUSIC_DIR') else GLOBAL_SETTINGS['music-dir']
print("Music: " + GLOBAL_SETTINGS['music-dir'])
def setup(self):
self.args()
self.envvars()
GLOBAL_SETTINGS['MPlayerClass'] = MPlayer()
GLOBAL_SETTINGS['MusicListClass'] = MusicList(GLOBAL_SETTINGS['music-dir'])
GLOBAL_SETTINGS['running-dir'] = os.path.dirname(os.path.realpath(__file__))
GLOBAL_SETTINGS['auth-token'] = str(uuid.uuid4())
try:
os.stat(GLOBAL_SETTINGS["cache-dir"])
except:
os.mkdir(GLOBAL_SETTINGS["cache-dir"])
def run(self):
app.run(host='0.0.0.0', threaded=True, port=GLOBAL_SETTINGS['server-port'])
system = Startup()
system.setup()
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.config['JSON_AS_ASCII'] = False
CORS(app)
Compress(app)
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
def authMiddleware():
resp = {"status": 401}
token = request.args.get('token')
if token is not None:
resp["status"] = 200 if token == GLOBAL_SETTINGS['auth-token'] else resp["status"]
return resp
def play_file(file, offset):
GLOBAL_SETTINGS['MusicListClass'].currentFile = file
path = GLOBAL_SETTINGS['MusicListClass'].get_file_path(file['id'])
GLOBAL_SETTINGS['MPlayerClass'].play(path, offset)
def ytdl_hook(d):
if d['status'] == 'finished':
pass
'''==================================================
Routes
=================================================='''
@app.route('/api/commands/pause', methods=['POST'])
def pause():
GLOBAL_SETTINGS['MPlayerClass'].pause()
return '', 200
@app.route('/api/commands/stop', methods=['POST'])
def stop():
GLOBAL_SETTINGS['MPlayerClass'].stop()
return '', 200
@app.route('/api/commands/info', methods=['POST'])
def get_info():
resp = authMiddleware()
if resp['status'] == 200:
resp = GLOBAL_SETTINGS['MPlayerClass'].get_playing_track_info()
jsonify(**resp)
@app.route('/api/commands/formats')
def get_quality():
resp = authMiddleware()
if resp['status'] == 200:
resp = {
'format': STREAM_FORMAT,
'quality': STREAM_QUALITY
}
return jsonify(**resp)
@app.route('/api/commands/rescan')
def rescanner():
resp = authMiddleware()
if resp['status'] != 200:
return jsonify(**resp)
lastUpdate = request.args.get('lastUpdate')
if lastUpdate is None:
lastUpdate = 0
else:
lastUpdate = int(lastUpdate)
root_dir = GLOBAL_SETTINGS['MusicListClass'].root
updated = GLOBAL_SETTINGS['MusicListClass'].latest_rescan_diff()
resp = {'more': False, 'time': updated, 'added': [], 'removed': []}
if lastUpdate >= updated:
#if the last update time matches both the client and the server
#check for new files on the server to push
#otherwise, we just need to sync the client up with the server
oldHash = GLOBAL_SETTINGS['MusicListClass'].fileHash
RescanHash = FileHashNodeTree(root_dir)
RescanHash.scan_directory(root_dir, '.', '.', oldHash)
RescanHash.resolve_scan_diff(root_dir, '.', '.', oldHash)
#merge the new files added back into the original file tree
resp['added'] = RescanHash.get_files()
resp['removed'] = oldHash.merge_scan_diff(RescanHash)
GLOBAL_SETTINGS['MusicListClass'].save_rescan_diff(RescanHash, resp['removed'])
resp['time'] = GLOBAL_SETTINGS['MusicListClass'].latest_rescan_diff()
else:
diffsList = GLOBAL_SETTINGS['MusicListClass'].get_rescan_diffs(lastUpdate)
combinedDiffs = diffsList.pop(0)
resp['removed'] = combinedDiffs.deleted
resp['time'] = combinedDiffs.date
resp['more'] = resp['time'] <= updated;
resp['added'] = combinedDiffs.filehashnode.get_files()
return jsonify(**resp)
@app.route('/api/files')
def files():
resp = authMiddleware()
if resp['status'] != 200:
return jsonify(**resp)
obj = {
'root' : GLOBAL_SETTINGS['music-dir'],
'files': GLOBAL_SETTINGS['MusicListClass'].fileHash.get_files(),
'count': len(GLOBAL_SETTINGS['MusicListClass'].mapping.keys())
}
return jsonify(**obj)
@app.route('/api/files/search/<string:keyword>')
def search(keyword):
resp = authMiddleware()
if resp['status'] != 200:
return jsonify(**resp)
keyword = keyword.strip()
if len(keyword) <= 0:
return '', 400
return jsonify(**GLOBAL_SETTINGS["MusicListClass"].search_media(keyword))
@app.route('/api/files/<string:identifier>')
def file(identifier):
resp = authMiddleware()
if resp['status'] != 200:
return jsonify(**resp)
file = GLOBAL_SETTINGS['MusicListClass'].get_file(identifier)
if not file:
return '', 400
return jsonify(**file)
@app.route('/api/files/<string:identifier>/cover')
@app.route('/api/files/<string:identifier>/cover/<string:covername>')
def get_cover(identifier, covername=None):
resp = authMiddleware()
if resp['status'] != 200:
return jsonify(**resp)
filepath = GLOBAL_SETTINGS['MusicListClass'].get_file_path(identifier)
if filepath is None: return '', 400
elif covername is not None:
path, code = GLOBAL_SETTINGS["MusicListClass"].cache_album_art(filepath, covername)
response = {
'code': code,
'path': path
}
return jsonify(**response)
else:
path, code = GLOBAL_SETTINGS['MusicListClass'].extract_album_art(filepath)
response = {
'code': code,
'path': path
}
return jsonify(**response)
@app.route('/api/files/<string:identifier>/play')
def play(identifier):
resp = authMiddleware()
if resp['status'] != 200:
return jsonify(**resp)
offset = request.args.get('offset')
file = GLOBAL_SETTINGS['MusicListClass'].get_file(identifier)
if not file:
return '', 400
play_file(file, offset)
return '', 200
@app.route('/api/files/<string:identifier>/data')
def metadata(identifier):
resp = authMiddleware()
if resp['status'] != 200:
return jsonify(**resp)
data = GLOBAL_SETTINGS['MusicListClass'].get_audio_metadata(identifier)
return jsonify(**data)
@app.route('/api/files/<string:identifier>/stream')
def streamAudio(identifier):
resp = authMiddleware()
if resp['status'] != 200:
return jsonify(**resp)
filename = GLOBAL_SETTINGS['MusicListClass'].get_file_path(identifier)
if not file:
return '', 400
destType = request.args.get('format')
if destType is not None:
destType = destType.lower()
if destType not in STREAM_FORMAT:
destType = GLOBAL_SETTINGS['stream-format']
else:
destType = GLOBAL_SETTINGS['stream-format']
# allow user to force transcode all audio regardless if its already
# supported or not
doTranscode = request.args.get('transcode')
if doTranscode is not None:
doTranscode = (doTranscode.lower() == 'true')
else:
doTranscode = False
# allow user to adjust quality of streaming
quality = request.args.get('quality')
newFile = '{}'.format(filename)
ext = os.path.splitext(filename)[1].lower()[1:]
if ext in TRANSCODE_FROM or doTranscode:
data = GLOBAL_SETTINGS['MusicListClass'].get_file_metadata(newFile)
guessTranscodedSize(destType, quality, data)
newFile, proc = GLOBAL_SETTINGS['MusicListClass'].transcode_audio(
filename, quality, destType)
headers, offset = makeRangeHeader(data)
# try opening the output file until it's successful
tfh = None
while tfh is None:
try:
tfh = open(newFile, 'rb')
except:
# give ffmpeg some time to start transcoding
time.sleep(1)
tfh.close()
@stream_with_context
def generate(inFile, ffmpegProc, pos):
file = open(inFile, 'rb')
if pos > 0: file.seek(pos, 0)
doneTranscode = False
while True:
chunk = file.read(GLOBAL_SETTINGS["stream-chunk"])
if len(chunk) > 0:
yield chunk
# if no bytes were read, check if transcoding is still
# happening
doneTranscode = ffmpegProc.poll() is not None
if len(chunk) == 0 and doneTranscode:
break
file.close()
sendtype = AUDIO_MIMETYPES['{}'.format(destType)]
resp = Response(stream_with_context(generate(newFile, proc, offset)), mimetype=sendtype, headers=headers)
resp.status_code = 206
return resp
# no transcoding, just streaming if audio is already in a streamable format
elif ext in STREAM_FORMAT:
data = GLOBAL_SETTINGS['MusicListClass'].get_file_metadata(newFile)
headers, offset = makeRangeHeader(data)
def generate(inFile, pos):
file = open(inFile, 'rb')
if pos > 0 and pos < data['size']: file.seek(pos, 0)
elif pos >= data['size']:
file.close()
return
while True:
chunk = file.read(GLOBAL_SETTINGS["stream-chunk"])
if chunk:
yield chunk
else:
break
file.close()
sendtype = AUDIO_MIMETYPES['{}'.format(ext)]
resp = Response(stream_with_context(generate(newFile, offset)), mimetype=sendtype, headers=headers)
resp.status_code = 206
return resp
# for whatever isn't an audio file
return send_file(newFile)
@app.route('/api/youtube')
def youtube():
yturl = request.args.get('yturl')
resp = {"status": 400}
YOUTUBE_DL_OPTS['progress_hooks'] = [ytdl_hook]
with youtube_dl.YoutubeDL(YOUTUBE_DL_OPTS) as ydl:
ydl.download([yturl])
resp["status"] = 200
return jsonify(**resp)
@app.route('/<path:filename>')
def serving(filename):
if GLOBAL_SETTINGS['music-dir'] in filename:
resp = authMiddleware()
if resp['status'] != 200:
return jsonify(**resp)
# for whatever isn't an audio file
return send_file(filename)
@app.route('/')
def togui():
return redirect(url_for('index'))
@app.route('/gui')
def index():
doStream = bool(request.args.get('stream'))
return render_template('index.html', enableStream=doStream)
@app.route('/authenticate', methods=['POST'])
def authenticate():
| |
0.002215 seconds
timeseries = sorted(timeseries, key=lambda x: x[0])
if debug_logging:
current_logger.debug('debug :: %s :: time series of length - %s' % (
algorithm_name, str(len(timeseries))))
# Testing the data to ensure it meets minimum requirements, in the case
# of Skyline's use of the m66 algorithm this means that:
# - the time series must have at least 75% of its full_duration
do_not_use_sparse_data = False
if current_skyline_app == 'luminosity':
do_not_use_sparse_data = True
if minimum_sparsity == 0:
do_not_use_sparse_data = False
total_period = 0
total_datapoints = 0
calculate_variables = False
if do_not_use_sparse_data:
calculate_variables = True
if determine_duration:
calculate_variables = True
if calculate_variables:
try:
start_timestamp = int(timeseries[0][0])
end_timestamp = int(timeseries[-1][0])
total_period = end_timestamp - start_timestamp
total_datapoints = len(timeseries)
except SystemExit as e:
if debug_logging:
current_logger.debug('debug_logging :: %s :: SystemExit called, exiting - %s' % (
algorithm_name, e))
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
except:
traceback_msg = traceback.format_exc()
record_algorithm_error(current_skyline_app, parent_pid, algorithm_name, traceback_msg)
if debug_logging:
current_logger.error(traceback_msg)
current_logger.error('error :: debug_logging :: %s :: failed to determine total_period and total_datapoints' % (
algorithm_name))
timeseries = []
if not timeseries:
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
if current_skyline_app == 'analyzer':
# Default for analyzer at required period to 18 hours
period_required = int(FULL_DURATION * 0.75)
else:
# Determine from timeseries
if total_period < FULL_DURATION:
period_required = int(FULL_DURATION * 0.75)
else:
period_required = int(total_period * 0.75)
if determine_duration:
period_required = int(total_period * 0.75)
if do_not_use_sparse_data:
# If the time series does not have 75% of its full_duration it does
# not have sufficient data to sample
try:
if total_period < period_required:
if debug_logging:
current_logger.debug('debug :: %s :: time series does not have sufficient data' % (
algorithm_name))
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
except SystemExit as e:
if debug_logging:
current_logger.debug('debug_logging :: %s :: SystemExit called, exiting - %s' % (
algorithm_name, e))
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
except:
traceback_msg = traceback.format_exc()
record_algorithm_error(current_skyline_app, parent_pid, algorithm_name, traceback_msg)
if debug_logging:
current_logger.error(traceback_msg)
current_logger.error('error :: debug_logging :: %s :: falied to determine if time series has sufficient data' % (
algorithm_name))
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
# If the time series does not have 75% of its full_duration
# datapoints it does not have sufficient data to sample
# Determine resolution from the last 30 data points
# INFO took 0.002060 seconds
if not resolution:
resolution_timestamps = []
metric_resolution = False
for metric_datapoint in timeseries[-30:]:
timestamp = int(metric_datapoint[0])
resolution_timestamps.append(timestamp)
timestamp_resolutions = []
if resolution_timestamps:
last_timestamp = None
for timestamp in resolution_timestamps:
if last_timestamp:
resolution = timestamp - last_timestamp
timestamp_resolutions.append(resolution)
last_timestamp = timestamp
else:
last_timestamp = timestamp
try:
del resolution_timestamps
except:
pass
if timestamp_resolutions:
try:
timestamp_resolutions_count = Counter(timestamp_resolutions)
ordered_timestamp_resolutions_count = timestamp_resolutions_count.most_common()
metric_resolution = int(ordered_timestamp_resolutions_count[0][0])
except SystemExit as e:
if debug_logging:
current_logger.debug('debug_logging :: %s :: SystemExit called, exiting - %s' % (
algorithm_name, e))
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
except:
traceback_msg = traceback.format_exc()
record_algorithm_error(current_skyline_app, parent_pid, algorithm_name, traceback_msg)
if debug_logging:
current_logger.error(traceback_msg)
current_logger.error('error :: debug_logging :: %s :: failed to determine if time series has sufficient data' % (
algorithm_name))
try:
del timestamp_resolutions
except:
pass
else:
metric_resolution = resolution
minimum_datapoints = None
if metric_resolution:
minimum_datapoints = int(period_required / metric_resolution)
if minimum_datapoints:
if total_datapoints < minimum_datapoints:
if debug_logging:
current_logger.debug('debug :: %s :: time series does not have sufficient data, minimum_datapoints required is %s and time series has %s' % (
algorithm_name, str(minimum_datapoints),
str(total_datapoints)))
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
# Is the time series fully populated?
# full_duration_datapoints = int(full_duration / metric_resolution)
total_period_datapoints = int(total_period / metric_resolution)
# minimum_percentage_sparsity = 95
minimum_percentage_sparsity = 90
sparsity = int(total_datapoints / (total_period_datapoints / 100))
if sparsity < minimum_percentage_sparsity:
if debug_logging:
current_logger.debug('debug :: %s :: time series does not have sufficient data, minimum_percentage_sparsity required is %s and time series has %s' % (
algorithm_name, str(minimum_percentage_sparsity),
str(sparsity)))
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
if len(set(item[1] for item in timeseries)) == 1:
if debug_logging:
current_logger.debug('debug :: %s :: time series does not have sufficient variability, all the values are the same' % algorithm_name)
anomalous = False
anomalyScore = 0.0
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
end_preprocessing = timer()
preprocessing_runtime = end_preprocessing - start_preprocessing
if debug_logging:
current_logger.debug('debug :: %s :: preprocessing took %.6f seconds' % (
algorithm_name, preprocessing_runtime))
if not timeseries:
if debug_logging:
current_logger.debug('debug :: %s :: m66 not run as no data' % (
algorithm_name))
anomalies = []
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
if debug_logging:
current_logger.debug('debug :: %s :: timeseries length: %s' % (
algorithm_name, str(len(timeseries))))
anomalies_dict['timestamp'] = int(timeseries[-1][0])
anomalies_dict['from_timestamp'] = int(timeseries[0][0])
start_analysis = timer()
try:
# bottleneck is used because it is much faster
# pd dataframe method (1445 data point - 24hrs): took 0.077915 seconds
# bottleneck method (1445 data point - 24hrs): took 0.005692 seconds
# numpy and pandas rolling
# 2021-07-30 12:37:31 :: 2827897 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 136.93 seconds
# 2021-07-30 12:44:53 :: 2855884 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 148.82 seconds
# 2021-07-30 12:48:41 :: 2870822 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 145.62 seconds
# 2021-07-30 12:55:00 :: 2893634 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 139.00 seconds
# 2021-07-30 12:59:31 :: 2910443 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 144.80 seconds
# 2021-07-30 13:02:31 :: 2922928 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 143.35 seconds
# 2021-07-30 14:12:56 :: 3132457 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 129.25 seconds
# 2021-07-30 14:22:35 :: 3164370 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 125.72 seconds
# 2021-07-30 14:28:24 :: 3179687 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 222.43 seconds
# 2021-07-30 14:33:45 :: 3179687 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 244.00 seconds
# 2021-07-30 14:36:27 :: 3214047 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 141.10 seconds
# numpy and bottleneck
# 2021-07-30 16:41:52 :: 3585162 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 73.92 seconds
# 2021-07-30 16:46:46 :: 3585162 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 68.84 seconds
# 2021-07-30 16:51:48 :: 3585162 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 70.55 seconds
# numpy and bottleneck (passing resolution and not calculating in m66)
# 2021-07-30 16:57:46 :: 3643253 :: cloudbursts :: find_cloudbursts completed on 1530 metrics in 65.59 seconds
if use_bottleneck:
if len(timeseries) < 10:
if current_skyline_app == 'webapp':
return (anomalous, anomalyScore, anomalies, anomalies_dict)
if return_anomalies:
return (anomalous, anomalyScore, anomalies)
return (anomalous, anomalyScore)
x_np = np.asarray([x[1] for x in timeseries])
# Fast Min-Max scaling
data = (x_np - x_np.min()) / (x_np.max() - x_np.min())
# m66 - calculate to nth_median
median_count = 0
while median_count < nth_median:
median_count += 1
rolling_median_s = bn.move_median(data, window=window)
median = rolling_median_s.tolist()
data = median
if median_count == nth_median:
break
# m66 - calculate the moving standard deviation for the
# nth_median array
rolling_std_s = bn.move_std(data, window=window)
std_nth_median_array = np.nan_to_num(rolling_std_s, copy=False, nan=0.0, posinf=None, neginf=None)
std_nth_median = std_nth_median_array.tolist()
if debug_logging:
current_logger.debug('debug :: %s :: std_nth_median calculated with bn' % (
algorithm_name))
else:
df = pd.DataFrame(timeseries, columns=['date', 'value'])
df['date'] = pd.to_datetime(df['date'], unit='s')
datetime_index = pd.DatetimeIndex(df['date'].values)
df = df.set_index(datetime_index)
df.drop('date', axis=1, inplace=True)
original_df = df.copy()
# MinMax scale
df = (df - | |
#!/usr/bin/env python3
from dataknead import Knead
from facetool import config, media, util
from facetool.constants import *
from facetool.path import Path
from facetool.profiler import Profiler
from facetool.errors import ArgumentError
from facetool.util import message, force_mkdir, sample_remove, is_json_path
from random import random
from tqdm import tqdm
import argparse
import logging
import json
import os
import pandas as pd
import pdb
import shutil
import sys
COMMANDS = (
"average",
"classify",
"cluster",
"combineaudio",
"combineframes",
"count",
"distance",
"crop",
"encode",
"extractframes",
"landmarks",
"locate",
"pose",
"probe",
"sample",
"swap",
)
OUTPUT_FORMAT_CHOICES = (
"default",
"csv",
"json"
)
SWAP_METHODS = [
"faceswap",
"faceswap3d"
]
logger = logging.getLogger(__name__)
# Note that we always profile, we just don't print the output if the
# option is not enabled
profiler = Profiler("facetool.py")
def get_parser():
parser = argparse.ArgumentParser(description = "Manipulate faces in videos and images")
# Essentials
parser.add_argument("command", choices = COMMANDS, nargs = "?")
parser.add_argument("-i", "--input", type = str,
required = True,
help = "Input file or folder, 'face' when swapping"
)
parser.add_argument("-o", "--output", type = str,
help = "Output file or folder",
default = None
)
parser.add_argument("-t", "--target", type = str,
help = "'Head' when swapping"
)
# Extra arguments
parser.add_argument("-ai", "--audio-input", type = str,
default = None,
help = "Add a separate audio file with the end result movie"
)
parser.add_argument("--as-percentage", action = "store_true",
help = "Show face distances as percentages"
)
parser.add_argument("-bl", "--blur", type = float,
default = BLUR_AMOUNT,
help = "Amount of blur to use during colour correction"
)
parser.add_argument("-dd", "--data-directory", type = str,
default = DATA_DIRECTORY,
help = "Directory where the data files are located"
)
parser.add_argument("-f", "--force", action = "store_true",
help = "Force commands and ignore warnings, like with sample"
)
parser.add_argument("-fr", "--framerate", type = str,
default = DEFAULT_FRAMERATE
)
parser.add_argument("-fa", "--feather", type = int,
default = FEATHER_AMOUNT,
help = "Softness of edges on a swapped face"
)
parser.add_argument("-if", "--ignore-nofaces", action = "store_true",
default = False,
help = "When having no faces to swap, keep the original input image"
)
parser.add_argument("-ih", "--image-height", type = int,
default = DEFAULT_IMAGE_HEIGHT,
help = "Height of output image / height"
)
parser.add_argument("-iw", "--image-width", type = int,
default = DEFAULT_IMAGE_WIDTH,
help = "Width of output image / video"
)
parser.add_argument("-kt", "--keep-temp", action = "store_true",
help = "Keep temporary files (used with video swapping)"
)
parser.add_argument("-m", "--model", type = str,
help = "Use a precalculated model (for calculating distances)"
)
parser.add_argument("--no-audio", action = "store_true")
parser.add_argument("-nocc", "--no-colour-correct", action = "store_true",
help = "Don't colour correct"
)
parser.add_argument("--no-eyesbrows", action = "store_true")
parser.add_argument("--no-nosemouth", action = "store_true")
parser.add_argument("--no-threading", action = "store_true",
help = "Don't use multithreading"
)
parser.add_argument("--only-mouth", action="store_true")
parser.add_argument("-of", "--output-format",
choices = OUTPUT_FORMAT_CHOICES,
help = "Specify output format"
)
parser.add_argument("-pp", "--predictor-path", type = str,
default = PREDICTOR_PATH
)
parser.add_argument("--profile", action = "store_true",
help = "Show profiler information"
)
parser.add_argument("-q", "--quiet", action = "store_true",
help = "Don't print output to the console"
)
parser.add_argument("-s", "--swap", action = "store_true",
help = "Swap input and target"
)
parser.add_argument("--save-originals", action = "store_true",
help = "Save original images when averaging faces"
)
parser.add_argument("--save-warped", action = "store_true",
help = "Save warped images when averaging faces"
)
parser.add_argument("--swap-method",
choices = SWAP_METHODS,
default = SWAP_METHODS[0],
help = f"Swap method for faceswap (options are: {SWAP_METHODS}"
)
parser.add_argument("-so", "--swap-order", type = str,
help = "Comma-separated list with order of faceswaps on target, implies a multiswap"
)
parser.add_argument("-sp", "--sample-percentage", type = float,
help = "Percentage of files in a directory to randomly remove (used for the sample command)"
)
parser.add_argument("-sr", "--swap-order-repeat", action = "store_true", default = False,
help = "When using --swap-order and there are not enough target faces, repeat the sequence"
)
parser.add_argument("--temp-dir", type = str,
help = "Define the directory where temporary files should be placed"
)
parser.add_argument("-v", "--verbose", action = "store_true",
help = "Show debug information"
)
parser.add_argument("-vv", "--extra-verbose", action = "store_true",
help = "Show debug information AND raise / abort on exceptions"
)
parser.add_argument("--warp-3d", action="store_true",
help = "Swap faces and morph to coordinates of target face"
)
return parser
def main(args):
if args.verbose or args.extra_verbose:
logging.basicConfig(level=logging.DEBUG)
logging.debug(args)
config.PROFILE = args.profile
config.QUIET = args.quiet
config.VERBOSE = args.verbose or args.extra_verbose
# Check for invalid argument combinations
if any([args.output_format == "csv", args.output_format == "json"]) and not args.output:
raise ArgumentError("With CSV as output format, a filename (-o) is required")
# Swap around input and target
if args.swap:
args.input, args.target = args.target, args.input
# Okay, the main stuff, get the command
# Extract all frames from a movie to a set of jpg files
if args.command == "extractframes":
util.mkdir_if_not_exists(args.output)
media.extractframes(args.input, args.output)
# Combine all frames from a set of jpg files to a movie
elif args.command == "combineframes":
media.combineframes(args.input, args.output, framerate = args.framerate)
# Combine audio with an input movie
elif args.command == "combineaudio":
media.combineaudio(args.input, args.audio_input, args.output)
# Randomly remove (sample) a percentage of files from a given directory
elif args.command == "sample":
if not args.sample_percentage:
raise ArgumentError("The sample command needs a sample percentage (-sp)")
sample_remove(args.input, args.sample_percentage, force_delete = args.force)
# Show metadata on a media file
elif args.command == "probe":
try:
data = media.probe(args.input)
except:
raise ArgumentError(f"Could not probe '{args.input}', probably not a video/image file")
else:
jsondata = json.dumps(data, indent = 4)
message(jsondata)
elif args.command == "landmarks":
from facetool.landmarks import Landmarks
landmarks = Landmarks(predictor_path = args.predictor_path)
save_data = args.output_format and args.output_format != "default"
if save_data:
data = []
# Check if we *could* have an output directory, and if so,
# create it
if args.output and Path(args.output).could_be_dir():
Path(args.output).mkdir_if_not_exists()
for pathobj in Path(args.input).images():
path = str(pathobj)
logging.debug(f"Processing {path}")
logging.debug(f"Getting landmarks of {path}")
if not args.output:
outpath = None
else:
out = Path(args.output)
if out.is_dir():
outpath = f"{out}/{Path(path).name}"
else:
outpath = str(out)
marks = landmarks.get_landmarks(str(path), outpath = outpath)
if marks and save_data:
points = [str(path)]
[points.extend([m.x, m.y]) for m in marks]
data.append(points)
message(path, marks)
if save_data:
df = pd.DataFrame(data)
if args.output_format == "csv":
df.to_csv(args.output)
elif args.output_format == "json":
df.to_json(args.output)
else:
raise ArgumentError(f"Invalid output format: {args.output_format}")
elif args.command == "pose":
from facetool.poser import Poser
poser = Poser(predictor_path = args.predictor_path)
# Check if we *could* have an output directory, and if so,
# create it
if args.output and Path(args.output).could_be_dir():
Path(args.output).mkdir_if_not_exists()
for pathobj in Path(args.input).images():
path = str(pathobj)
logging.debug(f"Processing {path}")
if not args.output:
outpath = None
else:
out = Path(args.output)
if out.is_dir():
outpath = f"{out}/{Path(path).name}"
else:
outpath = str(out)
poses = poser.get_poses(path, outpath = outpath)
message(f"{path}: {poses}")
elif args.command == "count":
from facetool.detect import Detect
detect = Detect()
if args.output_format == "csv":
csv = []
for path in Path(args.input).images():
count = detect.count(path)
message(f"Number of faces in '{path}': {count}")
if args.output_format == "csv":
csv.append({
"path" : path,
"count" : count
})
if args.output_format == "csv":
df = pd.DataFrame(csv)
df.to_csv(args.output)
elif args.command == "locate":
from facetool.detect import Detect
detect = Detect()
for path in Path(args.input).images():
to_directory = os.path.isdir(args.input)
locations = detect.locate(path, args.output, to_directory = to_directory)
message(f"Face locations in '{args.input}': {locations}")
elif args.command == "crop":
from facetool.detect import Detect
from facetool.media import extractframes
# We can't crop to an image path, because an input image might
# have multiple faces, so throw an error in that case
if Path(args.output).is_image():
raise ArgumentError(f"Can't crop with an image as output")
detect = Detect()
# FIXME: we need some general mechanism for juggling frames around
TMP_DIR = "crop-tmp"
IS_VIDEO = Path(args.input).is_video()
logging.debug(f"Cropping. Input is video? {IS_VIDEO}")
if IS_VIDEO:
force_mkdir(TMP_DIR)
extractframes(args.input, TMP_DIR)
images = Path(TMP_DIR).images()
else:
images = Path(args.input).images()
for path in images:
logging.debug(f"Cropping <{path}>")
detect.crop(str(path), args.output)
if IS_VIDEO:
shutil.rmtree(TMP_DIR)
elif args.command == "classify":
from facetool.classifier import Classifier
classifier = Classifier(
data_directory = args.data_directory,
output_format = args.output_format,
predictor_path = args.predictor_path
)
for path in Path(args.input).images():
logging.debug(f"Classifying <{path}>")
classifier.classify(str(path))
if args.output_format == "csv":
classifier.to_csv(args.output)
elif args.command == "average":
from facetool.averager import Averager
profiler.tick("start averaging")
averager = Averager(
predictor_path = args.predictor_path,
img_height = args.image_height,
img_width = args.image_width,
save_originals = args.save_originals,
save_warped = args.save_warped
)
TMP_DIR = "average-tmp"
path = Path(args.input)
# If this is a video, extract all images and average those
if path.is_file() and path.is_video():
# First create a temporary directory to hold all frames
util.mkdir_if_not_exists(TMP_DIR)
media.extractframes(args.input, TMP_DIR)
# Now average
averager.average(TMP_DIR, args.output)
# And remove the temporary directory
logging.debug(f"Removing {TMP_DIR}")
| |
== 2
self.ilasm.store(link.target.inputargs[1])
else:
# the exception value is on the stack, store it in the proper place
if isinstance(link.last_exception, flowmodel.Variable):
self.ilasm.emit(jvmgen.DUP)
self.ilasm.store(link.last_exc_value)
fld = self.db.lltype_to_cts(rclass.OBJECT).lookup_field('meta')
self.ilasm.emit(fld)
self.ilasm.store(link.last_exception)
else:
self.ilasm.store(link.last_exc_value)
self._setup_link(link)
def render_numeric_switch(self, block):
if block.exitswitch.concretetype in (ootype.SignedLongLong, ootype.UnsignedLongLong):
# TODO: it could be faster to check is the values fit in
# 32bit, and perform a cast in that case
self.render_numeric_switch_naive(block)
return
cases, min_case, max_case, default = self._collect_switch_cases(block)
is_sparse = self._is_sparse_switch(cases, min_case, max_case)
if is_sparse:
log.WARNING('TODO: use lookupswitch to render sparse numeric_switches')
self.render_numeric_switch_naive(block)
return
targets = []
for i in xrange(min_case, max_case+1):
link, lbl = cases.get(i, default)
targets.append(lbl)
self.generator.load(block.exitswitch)
self.generator.emit_tableswitch(min_case, targets, default[1])
self.render_switch_case(*default)
for link, lbl in cases.itervalues():
self.render_switch_case(link, lbl)
def render_return_block(self, block):
return_var = block.inputargs[0]
return_ty = self.db.lltype_to_cts(return_var.concretetype)
if return_var.concretetype is not ootype.Void:
self.ilasm.load(return_var)
self.ilasm.return_val(return_ty)
def render_raise_block(self, block):
exc = block.inputargs[1]
self.ilasm.load(exc)
# Check whether the static type is known to be throwable.
# If not, emit a CHECKCAST to the base exception type.
# According to Samuele, no non-Exceptions should be thrown,
# but this is not enforced by the RTyper or annotator.
jtype = self.db.lltype_to_cts(exc.concretetype)
if not jtype.throwable:
self.ilasm.downcast_jtype(self.db.exception_root_object())
self.ilasm.throw()
def _trace(self, str, writeline=False):
if writeline:
str += '\n'
jvmgen.SYSTEMERR.load(self.generator)
self.generator.load_string(str)
jvmgen.PRINTSTREAMPRINTSTR.invoke(self.generator)
def _is_printable(self, res):
if res.concretetype in (
ootype.Instance,
ootype.Signed,
ootype.Unsigned,
ootype.SignedLongLong,
ootype.UnsignedLongLong,
ootype.Bool,
ootype.Float,
ootype.Char,
ootype.UniChar,
ootype.String,
ootype.StringBuilder,
ootype.Class):
return True
if isinstance(res.concretetype, (
ootype.Instance,
ootype.Record,
ootype.List,
ootype.Dict,
ootype.DictItemsIterator)):
return True
return False
def _trace_value(self, prompt, res):
if res and self._is_printable(res):
jmethod = self.db.toString_method_for_ootype(
res.concretetype)
self._trace(" "+prompt+": ")
self.generator.emit(jvmgen.SYSTEMERR)
self.generator.load(res)
self.generator.emit(jmethod)
self.generator.emit(jvmgen.PRINTSTREAMPRINTSTR)
self._trace("\n")
def _trace_enabled(self):
return getoption('trace')
def _render_op(self, op):
self.generator.add_comment(str(op))
OOFunction._render_op(self, op)
class StaticMethodInterface(Node, JvmGeneratedClassType):
"""
We generate an abstract base class when we need function pointers,
which correspond to constants of StaticMethod ootype. We need a
different interface for each different set of argument/return
types. These abstract base classes look like:
abstract class Foo {
public abstract ReturnType invoke(Arg1, Arg2, ...);
}
Depending on the signature of Arg1, Arg2, and ReturnType, this
abstract class may have additional methods and may implement
interfaces such as PyPy.Equals or PyPy.HashCode. This is to allow
it to interface with the the standalone Java code. See
the pypy.Callback interface for more information.
"""
def __init__(self, name, jargtypes, jrettype):
"""
argtypes: list of JvmTypes
rettype: JvmType
"""
JvmGeneratedClassType.__init__(self, name)
assert isinstance(jrettype, JvmType)
self.java_argument_types = [self] + list(jargtypes)
self.java_return_type = jrettype
self.dump_method = ConstantStringDumpMethod(
self, "StaticMethodInterface")
self.invoke_method_obj = jvmgen.Method.v(
self, 'invoke',
self.java_argument_types[1:], self.java_return_type)
def lookup_field(self, fieldnm):
raise KeyError(fieldnm) # no fields
def lookup_method(self, methodnm):
""" Given the method name, returns a jvmgen.Method object """
assert isinstance(self.java_return_type, JvmType)
if methodnm == 'invoke':
return self.invoke_method_obj
raise KeyError(methodnm) # only one method
def render(self, gen):
assert isinstance(self.java_return_type, JvmType)
# Scan through the jCallbackInterfaces and look for any
# that apply.
for jci in jCallbackInterfaces:
if jci.matches(self.java_argument_types[1:], self.java_return_type):
break
else:
jci = None
gen.begin_class(self, jObject, abstract=True)
if jci: gen.implements(jci)
gen.begin_constructor()
gen.end_constructor()
gen.begin_function('invoke', [], self.java_argument_types,
self.java_return_type, abstract=True)
gen.end_function()
# Because methods in the JVM are identified by both their name
# and static signature, we need to create a dummy "invoke"
# method if the Java callback interface argument types don't
# match the actual types for this method. For example, the
# equals interface has the static signature
# "(Object,Object)=>boolean", but there may be static methods
# with some signature "(X,Y)=>boolean" where X and Y are other
# types. In that case, we create an adaptor method like:
#
# boolean invoke(Object x, Object y) {
# return invoke((X)x, (Y)y);
# }
if (jci and
(jci.java_argument_types != self.java_argument_types[1:] or
jci.java_return_type != self.java_return_type)):
jci_jargs = [self] + list(jci.java_argument_types)
jci_ret = jci.java_return_type
gen.begin_function('invoke', [], jci_jargs, jci_ret)
idx = 0
for jci_arg, self_arg in zip(jci_jargs, self.java_argument_types):
gen.load_jvm_var(jci_arg, idx)
if jci_arg != self_arg:
gen.prepare_generic_result_with_jtype(self_arg)
idx += jci_arg.descriptor.type_width()
gen.emit(self.invoke_method_obj)
assert jci_ret == self.java_return_type # no variance here currently
gen.return_val(jci_ret)
gen.end_function()
gen.end_class()
class StaticMethodImplementation(Node, JvmGeneratedClassType):
"""
In addition to the StaticMethodInterface, we must generate an
implementation for each specific method that is called. These
implementation objects look like:
class Bar extends Foo {
public ReturnType invoke(Arg1, Arg2) {
return SomeStaticClass.StaticMethod(Arg1, Arg2);
}
}
If the bound_to_jty argument is not None, then this class
represents a bound method, and looks something like:
class Bar extends Foo {
Qux bound_to;
public static Bar bind(Qux to) {
Bar b = new Bar();
b.bound_to = to;
return b;
}
public ReturnType invoke(Arg1, Arg2) {
return bound_to.SomeMethod(Arg1, Arg2);
}
}
"""
def __init__(self, name, super_class, bound_to_jty, impl_method):
JvmGeneratedClassType.__init__(self, name)
self.super_class = super_class
self.impl_method = impl_method
self.dump_method = ConstantStringDumpMethod(
self, "StaticMethodImplementation")
if bound_to_jty:
self.bound_to_jty = bound_to_jty
self.bound_to_fld = jvmgen.Field(
self.name, 'bound_to', bound_to_jty, False)
self.bind_method = jvmgen.Method.s(
self, 'bind', (self.bound_to_jty,), self)
else:
self.bound_to_jty = None
self.bound_to_fld = None
self.bind_method = None
def lookup_field(self, fieldnm):
if self.bound_to_fld and fieldnm == self.bound_to_fld.name:
return self.bound_to_fld
return self.super_class.lookup_field(fieldnm)
def lookup_method(self, methodnm):
if self.bind_method and methodnm == 'bind':
return self.bind_method
return self.super_class.lookup_method(methodnm)
def render(self, gen):
gen.begin_class(self, self.super_class)
if self.bound_to_fld:
gen.add_field(self.bound_to_fld)
gen.begin_constructor()
gen.end_constructor()
# Emit the "bind" function which creates an instance if there is
# a bound field:
if self.bound_to_jty:
assert self.bound_to_fld and self.bind_method
gen.begin_function(
'bind', [], (self.bound_to_jty,), self, static=True)
gen.new_with_jtype(self)
gen.emit(jvmgen.DUP)
gen.load_jvm_var(self.bound_to_jty, 0)
self.bound_to_fld.store(gen)
gen.return_val(self)
gen.end_function()
# Emit the invoke() function, which just re-pushes the
# arguments and then invokes either the (possibly static)
# method self.impl_method. Note that if we are bound to an
# instance, we push that as the this pointer for
# self.impl_method.
gen.begin_function('invoke', [],
self.super_class.java_argument_types,
self.super_class.java_return_type)
if self.bound_to_fld:
gen.load_jvm_var(self, 0)
gen.emit(self.bound_to_fld)
for i in range(len(self.super_class.java_argument_types)):
if not i: continue # skip the this ptr
gen.load_function_argument(i)
gen.emit(self.impl_method)
gen.return_val(self.super_class.java_return_type)
gen.end_function()
gen.end_class()
class Interface(Node, JvmGeneratedInterfaceType):
"""
Represents an interface to be generated. The only class that we
currently generate into an interface is ootype.ROOT.
"""
def __init__(self, name):
JvmGeneratedInterfaceType.__init__(self, name)
self.super_class = jObject
self.rendered = False
self.properties = {}
self.methods = {}
def lookup_field(self, fieldnm):
# Right now, we don't need inheritance between interfaces.
return self.properties[fieldnm]
def lookup_method(self, methodnm):
# Right now, we don't need inheritance between interfaces.
return self.methods[methodnm]
def add_property(self, prop):
self.properties[prop.field_name] = prop
def add_method(self, method):
self.methods[method.name] = method
def render(self, gen):
self.rendered = True
gen.begin_class(self, self.super_class, interface=True)
def emit_method(method):
gen.begin_j_function(self, method, abstract=True)
gen.end_function()
for method in self.methods.values():
emit_method(method)
for prop in self.properties.values():
emit_method(prop.get_method)
emit_method(prop.put_method)
gen.end_class()
class Class(Node, JvmGeneratedClassType):
""" Represents a class to be emitted. Note that currently, classes
are emitted all in one shot, not piecemeal. """
def __init__(self, name, supercls=None):
"""
'name' should be a fully qualified Java class name like
"java.lang.String", supercls is a Class object
"""
JvmGeneratedClassType.__init__(self, name)
self.rendered = False # has rendering occurred?
self.fields = {} # maps field name to jvmgen.Field object
self.interfaces = [] # list of JvmTypes
self.methods = {} # maps method name to a Function object*
self.abstract_methods = {} # maps method name to jvmgen.Method object
self.set_super_class(supercls)
# * --- actually maps to an object that defines the
# attributes: name, method() and render(). Usually, this is a
# Function object, but in some subclasses it is not.
def simple_name(self):
dot = self.name.rfind('.')
if dot == -1: return self.name
return self.name[dot+1:]
def set_super_class(self, supercls):
self.super_class = supercls
# Throwability is inherited:
if self.super_class and self.super_class.throwable:
self.throwable = True
def add_field(self, fieldobj, fielddef):
""" Creates a new field accessed via the jvmgen.Field
descriptor 'fieldobj'. Must be called before render()."""
assert not self.rendered and isinstance(fieldobj, jvmgen.Field)
self.fields[fieldobj.field_name] = (fieldobj, fielddef)
def add_interface(self, inter):
assert not self.rendered and isinstance(inter, JvmType)
self.interfaces.append(inter)
def lookup_field(self, fieldnm):
if fieldnm in self.fields:
return self.fields[fieldnm][0]
return self.super_class.lookup_field(fieldnm)
def lookup_method(self, methodnm):
""" Given the method name, returns a jvmgen.Method object """
if methodnm in self.methods:
return self.methods[methodnm].method()
if methodnm in self.abstract_methods:
return self.abstract_methods[methodnm]
return self.super_class.lookup_method(methodnm)
def add_method(self, func):
""" Creates a new method in this class, represented by the
Function object 'func'. Must be called before render();
intended to be invoked by | |
to use the admin shortcut view.
"""
model_ctype = ContentType.objects.get_for_model(ModelWithStringPrimaryKey)
obj = ModelWithStringPrimaryKey.objects.create(string_pk='foo')
shortcut_url = reverse('admin:view_on_site', args=(model_ctype.pk, obj.pk))
# Not logged in: we should see the login page.
response = self.client.get(shortcut_url, follow=True)
self.assertTemplateUsed(response, 'admin/login.html')
# Logged in? Redirect.
self.client.force_login(self.superuser)
response = self.client.get(shortcut_url, follow=False)
# Can't use self.assertRedirects() because User.get_absolute_url() is silly.
self.assertEqual(response.status_code, 302)
# Domain may depend on contrib.sites tests also run
self.assertRegex(response.url, 'http://(testserver|example.com)/dummy/foo/')
def test_has_module_permission(self):
"""
has_module_permission() returns True for all users who
have any permission for that module (add, change, or delete), so that
the module is displayed on the admin index page.
"""
self.client.force_login(self.superuser)
response = self.client.get(self.index_url)
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(self.index_url)
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(self.index_url)
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(self.index_url)
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(self.index_url)
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
def test_overriding_has_module_permission(self):
"""
If has_module_permission() always returns False, the module shouldn't
be displayed on the admin index page for any users.
"""
articles = Article._meta.verbose_name_plural.title()
sections = Section._meta.verbose_name_plural.title()
index_url = reverse('admin7:index')
self.client.force_login(self.superuser)
response = self.client.get(index_url)
self.assertContains(response, sections)
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(index_url)
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(index_url)
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(index_url)
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(index_url)
self.assertNotContains(response, articles)
# The app list displays Sections but not Articles as the latter has
# ModelAdmin.has_module_permission() = False.
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin7:app_list', args=('admin_views',)))
self.assertContains(response, sections)
self.assertNotContains(response, articles)
def test_post_save_message_no_forbidden_links_visible(self):
"""
Post-save message shouldn't contain a link to the change form if the
user doesn't have the change permission.
"""
self.client.force_login(self.adduser)
# Emulate Article creation for user with add-only permission.
post_data = {
"title": "Fun & games",
"content": "Some content",
"date_0": "2015-10-31",
"date_1": "16:35:00",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_article_add'), post_data, follow=True)
self.assertContains(
response,
'<li class="success">The article “Fun & games” was added successfully.</li>',
html=True
)
@override_settings(
ROOT_URLCONF='admin_views.urls',
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}],
)
class AdminViewProxyModelPermissionsTests(TestCase):
"""Tests for proxy models permissions in the admin."""
@classmethod
def setUpTestData(cls):
cls.viewuser = User.objects.create_user(username='viewuser', password='<PASSWORD>', is_staff=True)
cls.adduser = User.objects.create_user(username='adduser', password='<PASSWORD>', is_staff=True)
cls.changeuser = User.objects.create_user(username='changeuser', password='<PASSWORD>', is_staff=True)
cls.deleteuser = User.objects.create_user(username='deleteuser', password='<PASSWORD>', is_staff=True)
# Setup permissions.
opts = UserProxy._meta
cls.viewuser.user_permissions.add(get_perm(UserProxy, get_permission_codename('view', opts)))
cls.adduser.user_permissions.add(get_perm(UserProxy, get_permission_codename('add', opts)))
cls.changeuser.user_permissions.add(get_perm(UserProxy, get_permission_codename('change', opts)))
cls.deleteuser.user_permissions.add(get_perm(UserProxy, get_permission_codename('delete', opts)))
# UserProxy instances.
cls.user_proxy = UserProxy.objects.create(username='user_proxy', password='<PASSWORD>')
def test_add(self):
self.client.force_login(self.adduser)
url = reverse('admin:admin_views_userproxy_add')
data = {
'username': 'can_add',
'password': '<PASSWORD>',
'date_joined_0': '2019-01-15',
'date_joined_1': '16:59:10',
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(UserProxy.objects.filter(username='can_add').exists())
def test_view(self):
self.client.force_login(self.viewuser)
response = self.client.get(reverse('admin:admin_views_userproxy_changelist'))
self.assertContains(response, '<h1>Select user proxy to view</h1>')
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('admin:admin_views_userproxy_change', args=(self.user_proxy.pk,)))
self.assertContains(response, '<h1>View user proxy</h1>')
self.assertContains(response, '<div class="readonly">user_proxy</div>')
def test_change(self):
self.client.force_login(self.changeuser)
data = {
'password': <PASSWORD>,
'username': self.user_proxy.username,
'date_joined_0': self.user_proxy.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': self.user_proxy.date_joined.strftime('%H:%M:%S'),
'first_name': 'first_name',
}
url = reverse('admin:admin_views_userproxy_change', args=(self.user_proxy.pk,))
response = self.client.post(url, data)
self.assertRedirects(response, reverse('admin:admin_views_userproxy_changelist'))
self.assertEqual(UserProxy.objects.get(pk=self.user_proxy.pk).first_name, 'first_name')
def test_delete(self):
self.client.force_login(self.deleteuser)
url = reverse('admin:admin_views_userproxy_delete', args=(self.user_proxy.pk,))
response = self.client.post(url, {'post': 'yes'}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(UserProxy.objects.filter(pk=self.user_proxy.pk).exists())
@override_settings(ROOT_URLCONF='admin_views.urls')
class AdminViewsNoUrlTest(TestCase):
"""Regression test for #17333"""
@classmethod
def setUpTestData(cls):
# User who can change Reports
cls.changeuser = User.objects.create_user(username='changeuser', password='<PASSWORD>', is_staff=True)
cls.changeuser.user_permissions.add(get_perm(Report, get_permission_codename('change', Report._meta)))
def test_no_standard_modeladmin_urls(self):
"""Admin index views don't break when user's ModelAdmin removes standard urls"""
self.client.force_login(self.changeuser)
r = self.client.get(reverse('admin:index'))
# we shouldn't get a 500 error caused by a NoReverseMatch
self.assertEqual(r.status_code, 200)
self.client.get(reverse('admin:logout'))
@skipUnlessDBFeature('can_defer_constraint_checks')
@override_settings(ROOT_URLCONF='admin_views.urls')
class AdminViewDeletedObjectsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
cls.deleteuser = User.objects.create_user(username='deleteuser', password='<PASSWORD>', is_staff=True)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
cls.v1 = Villain.objects.create(name='Adam')
cls.v2 = Villain.objects.create(name='Sue')
cls.sv1 = SuperVillain.objects.create(name='Bob')
cls.pl1 = Plot.objects.create(name='World Domination', team_leader=cls.v1, contact=cls.v2)
cls.pl2 = Plot.objects.create(name='World Peace', team_leader=cls.v2, contact=cls.v2)
cls.pl3 = Plot.objects.create(name='<NAME>', team_leader=cls.v1, contact=cls.v1)
cls.pd1 = PlotDetails.objects.create(details='almost finished', plot=cls.pl1)
cls.sh1 = SecretHideout.objects.create(location='underground bunker', villain=cls.v1)
cls.sh2 = SecretHideout.objects.create(location='floating castle', villain=cls.sv1)
cls.ssh1 = SuperSecretHideout.objects.create(location='super floating castle!', supervillain=cls.sv1)
cls.cy1 = CyclicOne.objects.create(name='I am recursive', two_id=1)
cls.cy2 = CyclicTwo.objects.create(name='I am recursive too', one_id=1)
def setUp(self):
self.client.force_login(self.superuser)
def test_nesting(self):
"""
Objects should be nested to display the relationships that
cause them to be scheduled for deletion.
"""
pattern = re.compile(
r'<li>Plot: <a href="%s">World Domination</a>\s*<ul>\s*'
r'<li>Plot details: <a href="%s">almost finished</a>' % (
reverse('admin:admin_views_plot_change', args=(self.pl1.pk,)),
reverse('admin:admin_views_plotdetails_change', args=(self.pd1.pk,)),
)
)
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.v1.pk,)))
self.assertRegex(response.content.decode(), pattern)
def test_cyclic(self):
"""
Cyclic relationships should still cause each object to only be
listed once.
"""
one = '<li>Cyclic one: <a href="%s">I am recursive</a>' % (
reverse('admin:admin_views_cyclicone_change', args=(self.cy1.pk,)),
)
two = '<li>Cyclic two: <a href="%s">I am recursive too</a>' % (
reverse('admin:admin_views_cyclictwo_change', args=(self.cy2.pk,)),
)
response = self.client.get(reverse('admin:admin_views_cyclicone_delete', args=(self.cy1.pk,)))
self.assertContains(response, one, 1)
self.assertContains(response, two, 1)
def test_perms_needed(self):
self.client.logout()
delete_user = User.objects.get(username='deleteuser')
delete_user.user_permissions.add(get_perm(Plot, get_permission_codename('delete', Plot._meta)))
self.client.force_login(self.deleteuser)
response = self.client.get(reverse('admin:admin_views_plot_delete', args=(self.pl1.pk,)))
self.assertContains(response, "your account doesn't have permission to delete the following types of objects")
self.assertContains(response, "<li>plot details</li>")
def test_protected(self):
q = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q, answer="Because.")
a2 = Answer.objects.create(question=q, answer="Yes.")
response = self.client.get(reverse('admin:admin_views_question_delete', args=(q.pk,)))
self.assertContains(response, "would require deleting the following protected related objects")
self.assertContains(
response,
'<li>Answer: <a href="%s">Because.</a></li>' % reverse('admin:admin_views_answer_change', args=(a1.pk,))
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Yes.</a></li>' % reverse('admin:admin_views_answer_change', args=(a2.pk,))
)
def test_post_delete_protected(self):
"""
A POST request to delete protected objects should display the page
which says the deletion is prohibited.
"""
q = Question.objects.create(question='Why?')
Answer.objects.create(question=q, answer='Because.')
response = self.client.post(reverse('admin:admin_views_question_delete', args=(q.pk,)), {'post': 'yes'})
self.assertEqual(Question.objects.count(), 1)
self.assertContains(response, "would require deleting the following protected related objects")
def test_restricted(self):
album = Album.objects.create(title='Amaryllis')
song = Song.objects.create(album=album, name='Unity')
response = self.client.get(reverse('admin:admin_views_album_delete', args=(album.pk,)))
self.assertContains(
response,
'would require deleting the following protected related objects',
)
self.assertContains(
response,
'<li>Song: <a href="%s">Unity</a></li>'
% reverse('admin:admin_views_song_change', args=(song.pk,))
)
def test_post_delete_restricted(self):
album = Album.objects.create(title='Amaryllis')
Song.objects.create(album=album, name='Unity')
response = self.client.post(
reverse('admin:admin_views_album_delete', args=(album.pk,)),
{'post': 'yes'},
)
self.assertEqual(Album.objects.count(), 1)
self.assertContains(
response,
'would require deleting the following protected related objects',
)
def test_not_registered(self):
should_contain = """<li>Secret hideout: underground bunker"""
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.v1.pk,)))
self.assertContains(response, should_contain, 1)
def test_multiple_fkeys_to_same_model(self):
"""
If a deleted object has two relationships from another model,
both of those should be followed in looking for related
objects to delete.
"""
should_contain = '<li>Plot: <a href="%s">World Domination</a>' % reverse(
'admin:admin_views_plot_change', args=(self.pl1.pk,)
)
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.v1.pk,)))
self.assertContains(response, should_contain)
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.v2.pk,)))
self.assertContains(response, should_contain)
def test_multiple_fkeys_to_same_instance(self):
"""
If a deleted object has two relationships pointing to it from
another object, the other object should still only be listed
once.
"""
should_contain = '<li>Plot: <a href="%s">World Peace</a></li>' % reverse(
'admin:admin_views_plot_change', args=(self.pl2.pk,)
)
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.v2.pk,)))
self.assertContains(response, should_contain, 1)
def test_inheritance(self):
"""
In the case of an inherited model, if either the child or
parent-model instance is deleted, both instances are listed
for deletion, as well as any relationships they have.
"""
should_contain = [
'<li>Villain: <a href="%s">Bob</a>' % reverse('admin:admin_views_villain_change', args=(self.sv1.pk,)),
'<li>Super villain: <a href="%s">Bob</a>' % reverse(
'admin:admin_views_supervillain_change', args=(self.sv1.pk,)
),
'<li>Secret hideout: floating castle',
'<li>Super secret hideout: super floating castle!',
]
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.sv1.pk,)))
for should in should_contain:
self.assertContains(response, should, 1)
response = self.client.get(reverse('admin:admin_views_supervillain_delete', args=(self.sv1.pk,)))
for should in should_contain:
self.assertContains(response, should, 1)
def test_generic_relations(self):
"""
If a deleted object has GenericForeignKeys pointing to it,
those objects should be listed for deletion.
"""
plot = self.pl3
tag = FunkyTag.objects.create(content_object=plot, name='hott')
should_contain = '<li>Funky tag: <a href="%s">hott' % reverse(
'admin:admin_views_funkytag_change', args=(tag.id,))
response = self.client.get(reverse('admin:admin_views_plot_delete', args=(plot.pk,)))
self.assertContains(response, should_contain)
def test_generic_relations_with_related_query_name(self):
"""
If a deleted object has GenericForeignKey with
GenericRelation(related_query_name='...') pointing to it, those objects
should be listed for deletion.
"""
bookmark = Bookmark.objects.create(name='djangoproject')
tag = FunkyTag.objects.create(content_object=bookmark, name='django')
tag_url = reverse('admin:admin_views_funkytag_change', args=(tag.id,))
should_contain = '<li>Funky tag: <a href="%s">django' % tag_url
response = self.client.get(reverse('admin:admin_views_bookmark_delete', args=(bookmark.pk,)))
self.assertContains(response, should_contain)
def test_delete_view_uses_get_deleted_objects(self):
"""The delete view uses ModelAdmin.get_deleted_objects()."""
book = Book.objects.create(name='Test Book')
response = self.client.get(reverse('admin2:admin_views_book_delete', args=(book.pk,)))
# BookAdmin.get_deleted_objects() returns custom text.
self.assertContains(response, 'a deletable object')
@override_settings(ROOT_URLCONF='admin_views.urls')
class TestGenericRelations(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
cls.v1 = Villain.objects.create(name='Adam')
cls.pl3 = Plot.objects.create(name='Corn Conspiracy', team_leader=cls.v1, contact=cls.v1)
def setUp(self):
self.client.force_login(self.superuser)
def test_generic_content_object_in_list_display(self):
FunkyTag.objects.create(content_object=self.pl3, name='hott')
response = self.client.get(reverse('admin:admin_views_funkytag_changelist'))
self.assertContains(response, "%s</td>" % self.pl3)
@override_settings(ROOT_URLCONF='admin_views.urls')
class AdminViewStringPrimaryKeyTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import re
import sys
from collections import defaultdict
import torch
import torch.optim as optim
from torch.optim import optimizer, lbfgs, adagrad, adadelta, rmsprop
from trident.backend.common import get_class, snake2camel
from trident.backend.pytorch_ops import *
__all__ = ['Adam', 'SGD', 'LBFGS', 'Adadelta', 'Adagrad', 'RMSprop', 'RAdam', 'PlainRAdam', 'AdamW', 'Lookahead',
'Ranger', 'RangerLars', 'AdaBelief', 'RangerAdaBelief', 'DiffGrad', 'Lamb', 'get_optimizer']
class Optimizer(optimizer.Optimizer):
"""Base class for all optimizers.
.. warning::
Parameters need to be specified as collections that have a deterministic
ordering that is consistent between runs. Examples of objects that don't
satisfy those properties are sets and iterators over values of dictionaries.
Args:
params (iterable): an iterable of :class:`tf.Variable` s or
:class:`dict` s. Specifies what Tensors should be optimized.
defaults: (dict): a dict containing default values of optimization
options (used when a parameter group doesn't specify them).
"""
def __init__(self, params, defaults):
super().__init__(params, defaults)
self._base_lr = 1e-3
self.gradient_centralization = None
def adjust_learning_rate(self, new_lr, verbose=True):
"""
Args:
new_lr (float): new learning rate value
verbose (bool): if True, will print the learning rate change information.
"""
old_lr = self.param_groups[0]['lr']
if old_lr != new_lr:
self.param_groups[0]['lr'] = new_lr
if verbose:
print('learning rate changed! ( form {0:.3e} to {1:.3e})'.format(old_lr, new_lr))
@property
def parameters(self):
"""
Returns: the weights need to train
"""
return [self.param_groups[i]['params'] for i in range(len(self.param_groups))]
@parameters.setter
def parameters(self,value):
"""
Returns: the weights need to train
"""
if isinstance(value, torch.Tensor):
raise TypeError("params argument given to the optimizer should be "
"an iterable of Tensors or dicts, but got " +
torch.typename(value))
if not hasattr(self,'param_groups') or self.param_groups is None or len(self.param_groups)==0:
self.param_groups=[]
param_groups = list(value)
if len(param_groups) == 0:
raise ValueError("optimizer got an empty parameter list")
if not isinstance(param_groups[0], dict):
param_groups = [{'params': param_groups}]
for param_group in param_groups:
self.add_param_group(param_group)
else:
self.param_groups[0]['params']=value
@property
def lr(self):
"""str: The getter method of the 'learning rate' property."""
return self.param_groups[0]['lr']
@lr.setter
def lr(self, value: float):
if self.lr != value:
old_lr = self.lr
new_lr = value
self.param_groups[0]['lr'] = new_lr
print('learning rate changed! ( form {0:.3e} to {1:.3e})'.format(old_lr, new_lr))
@property
def base_lr(self):
"""str: The getter method of the 'base learning rate' property (mean the starting learning rate ,
excluding warmup )."""
return self._base_lr
@base_lr.setter
def base_lr(self, value):
self._base_lr = value
class Adam(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
References
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.95, 0.999), eps=1e-7, weight_decay=0, amsgrad=False,
gradient_centralization=None, **kwargs):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
for p in group['params']:
if p.grad is None or not p.requires_grad:
continue
half_precision = False
if p.data.dtype == torch.float16:
half_precision = True
p.data = p.data.float()
p.grad = p.grad.float()
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
if any_abnormal_number(grad):
grad = where(is_abnormal_number(grad), zeros_like(grad), grad)
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['weight_decay'] != 0:
grad = grad.add(p, alpha=group['weight_decay'])
if self.gradient_centralization in ['all', 'gcc']:
if len(list(grad.size())) > 3:
grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
G_grad = exp_avg / denom
if self.gradient_centralization in ['all', 'gc']:
if len(list(G_grad.size())) > 1:
G_grad.add_(-G_grad.mean(dim=tuple(range(1, len(list(G_grad.size())))), keepdim=True))
p.data.add_(G_grad, alpha=-step_size)
if half_precision:
p.data = p.data.half()
p.grad = p.grad.half()
return loss
class SGD(optim.SGD):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Examples:
>>> SGD(lr=0.1, momentum=0.9)
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\end{aligned}
where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the
parameters, gradient, velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\
p_{t+1} & = p_{t} - v_{t+1}.
\end{aligned}
The Nesterov version is analogously modified.
"""
def __init__(self, params, lr=1e-3, momentum=0, dampening=0,
weight_decay=0, nesterov=False,**kwargs):
super().__init__(params, lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
def adjust_learning_rate(self, new_lr, verbose=True):
"""
Args:
new_lr (float): new learning rate value
verbose (bool): if True, will print the learning rate change information.
"""
old_lr = self.param_groups[0]['lr']
if old_lr != new_lr:
self.param_groups[0]['lr'] = new_lr
if verbose:
print('learning rate changed! ( form {0:.3e} to {1:.3e})'.format(old_lr, new_lr))
@property
def parameters(self):
"""
Returns: the weights need to train
"""
return self.param_groups[0]['params']
@property
def lr(self):
"""str: The getter method of the 'learning rate' property."""
return self.param_groups[0]['lr']
@lr.setter
def lr(self, value: float):
if self.lr != value:
old_lr = self.lr
new_lr = value
self.param_groups[0]['lr'] = new_lr
print('learning rate changed! ( form {0:.3e} to {1:.3e})'.format(old_lr, new_lr))
@property
def base_lr(self):
"""str: The getter method of the 'base learning rate' property (mean the starting learning rate ,
excluding warmup )."""
return self._base_lr
@base_lr.setter
def base_lr(self, value):
self._base_lr = value
class LBFGS(lbfgs.LBFGS):
"""Implements L-BFGS algorithm, heavily inspired by `minFunc
<https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>`.
.. warning::
This optimizer doesn't support per-parameter options and parameter
groups (there can be only one).
.. warning::
Right now all parameters have to be on a single device. This will be
improved in the future.
.. note::
This is a very memory intensive optimizer (it requires additional
``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory
try reducing the history size, or use a different algorithm.
Arguments:
lr (float): learning rate (default: 1)
| |
Suess+19, but this
can be tuned depending on the use. Each annulus is 1 PSF FWHM wide.'''
# make sure the catalog photometry is good for this object
if self.flag != 1:
print('Could not calculate photometry for '+str(self.id)+', flag = '+str(self.flag))
return -99
# make cutouts of the of the data, weight map, and segmentation map around this galaxy
pixY, pixX = self.get_pix(wcs)
dat = cutout(data, pixX, pixY, pscale = self.pscale)
whtDat = cutout(weight, pixX, pixY, pscale = self.pscale)
segDat = cutout(seg, pixX, pixY, pscale = self.pscale)
# explicitly mask out anything that SExtractor identified as belonging to another galaxy.
# note that this doesn't mask out anything SExtractor identifies as sky (good,
# b/c it's usually too conservative in its flux threshold for our purposes....)
mask = (segDat!=self.id) & (segDat!=0)
# make a long list of potential apertures; will trim later
apList = self.make_apertureList(wcs, paIm)
# initialize lists to store aperture photometry and its error
self.photometry[filterName] = []
self.photometry[filterName+'_err'] = []
# treat the first annulus separately: makes our stopping criterion below easier
ann=0
area = apList[ann].area() # built in photutils area-calculating function
# do aperture photometry on the mask to calculate the total non-masked area...
maskArea = photutils.aperture_photometry(mask, apList[ann])['aperture_sum'][0]
# ... and use it to make a correction factor to scale up the flux
corr = area / (area - maskArea)
flux = photutils.aperture_photometry(dat, apList[ann], mask=mask)['aperture_sum'][0] \
* corr * photflam # calculate flux in physical units
error = emptyApError(filterName, area, whtDat, self.pscale, self.survey) * photflam
# add flux and error to the list of measured aperture photometry
self.photometry[filterName].append(flux)
self.photometry[filterName+'_err'].append(error)
# following annuli: quit when S/N reaches the threshhold
while (self.photometry[filterName][-1] / self.photometry[filterName+'_err'][-1]) > SNthresh:
ann = ann+1 # increment counter
# area of an annulus is the area of this ellipse minus the ellipse inside it
area = apList[ann].area() - apList[ann-1].area()
# same for the mask
maskArea = (photutils.aperture_photometry(mask, apList[ann])['aperture_sum'][0] -
photutils.aperture_photometry(mask, apList[ann-1])['aperture_sum'][0])
corr = area / (area - maskArea)
# and for the flux
flux = (photutils.aperture_photometry(dat, apList[ann], mask=mask)['aperture_sum'][0] -
photutils.aperture_photometry(dat, apList[ann-1], mask=mask)['aperture_sum'][0]) \
* corr * photflam
# empty ap area only depends on the total area of the elliptical aperture
error = emptyApError(filterName, area, whtDat, self.pscale, self.survey) * photflam
# make sure that our measured flux is finite, and append it to the list
if np.isfinite(flux):
self.photometry[filterName].append(flux)
self.photometry[filterName+'_err'].append(error)
else:
self.photometry[filterName].append(np.nan)
self.photometry[filterName+'_err'].append(np.nan)
# trim extra annuli / edges that had S/N below the threshold
# but first, make sure that we actually have annuli to work with...
if ann==0:
return(-99)
self.nAnnuli = ann # update total number of annuli with the max we got to
self.edges = self.edges[:ann] # trim unnecessary edges
self.pscale = max(50, int(self.edges[-1]+20)) # and update the cutout scale if necessary
# trim the last photometric point (the one that failed S/N check)
self.photometry[filterName] = self.photometry[filterName][:-1]
self.photometry[filterName+'_err'] = self.photometry[filterName+'_err'][:-1]
def calcPhotometry(self, filterName, photflam, data, wcs, weight, seg, paIm):
'''Calculates aperture photometry for all annuli in one image.
Data, weight, and seg should be the full hdu[x].data arrays '''
# make sure the catalog photometry is good for this object
if self.flag != 1:
print('Could not calculate photometry for '+str(self.id)+', flag = '+str(self.flag))
return np.nan
# make cutouts of the of the data, weight map, and segmentation map around this galaxy
pixY, pixX = self.get_pix(wcs)
dat = cutout(data, pixX, pixY, pscale = self.pscale)
whtDat = cutout(weight, pixX, pixY, pscale = self.pscale)
segDat = cutout(seg, pixX, pixY, pscale = self.pscale)
mask = (segDat!=self.id) & (segDat!=0) # mask out other galaxies
# make list of apertures
apList = self.make_apertureList(wcs, paIm)
# don't measure photometry if the wht array is zero (ie, no exposures here)
if not np.sum(whtDat):
self.photometry[filterName] = [np.nan for i in range(self.nAnnuli)]
self.photometry[filterName+'_err'] = [np.nan for i in range(self.nAnnuli)]
return 0
# initialize lists to store aperture photometry and its error
self.photometry[filterName] = []
self.photometry[filterName+'_err'] = []
# and go actually measure aperture photometry in each annulus
for ann in range(self.nAnnuli):
# first annulus is just an ellipse (not an annulus)
if ann == 0:
area = apList[ann].area()
maskArea = photutils.aperture_photometry(mask, apList[ann])['aperture_sum'][0]
corr = area / (area - maskArea)
flux = photutils.aperture_photometry(dat, apList[ann], mask=mask)['aperture_sum'][0] \
* corr * photflam
# otherwise, subtract off flux from the ellipse before this one to make an elliptical annulus
else:
area = apList[ann].area() - apList[ann-1].area()
maskArea = (photutils.aperture_photometry(mask, apList[ann])['aperture_sum'][0] -
photutils.aperture_photometry(mask, apList[ann-1])['aperture_sum'][0])
corr = area / (area - maskArea)
flux = (photutils.aperture_photometry(dat, apList[ann], mask=mask)['aperture_sum'][0] -
photutils.aperture_photometry(dat, apList[ann-1], mask=mask)['aperture_sum'][0]) \
* corr * photflam
error = emptyApError(filterName, area, whtDat, self.pscale, self.survey) * photflam
if np.isfinite(flux):
self.photometry[filterName].append(flux)
self.photometry[filterName+'_err'].append(error)
else:
self.photometry[filterName].append(np.nan)
self.photometry[filterName+'_err'].append(np.nan)
def write_FAST(self, filters, translate, images, num):
'''This makes a list of strings nAnnuli long; each string is one
full line for a FAST input file. 'Order' is the order that we should
write the filters. This is provided because FAST isn't smart enough
to re-build the libraries if you have the same filters but they're
in a different order. '''
# initialize the list of lines we'll want to write out
fastList = []
# make a string for each annulus
for ann in range(self.nAnnuli):
# start with the galaxy ID and redshift
galStr = str(num) + str(ann) + '\t' + str(self.z) + '\t'
# now, for each filter we need to write out both the flux and the error
# in that filter
for fil in filters:
# if it's a filter where we've done resolved photometry...
if fil in images:
# make sure we've done calculation
if fil in self.photometry.keys():
# if it's not nan, write it out
if np.isfinite(self.photometry[fil][ann]):
fnu = Fnu25(self.photometry[fil][ann], translate[fil][0])
fnu_err = Fnu25(self.photometry[fil+'_err'][ann], translate[fil][0])
galStr += str(fnu) + '\t' + str(fnu_err) + '\t'
# if it's nan, write -99s
else:
galStr+= '-99\t-99\t'
else:
galStr+='-99\t-99\t'
# for filters without resolved photometry, just write out empties
# we do this so that FAST will predict the values in this filter,
# which we'll use later to calculate the integral constraint
# described in Wuyts+12
else:
galStr+= '-99\t-99\t'
fastList.append(galStr)
return fastList
def calc_corr(self, images):
'''calculate 'correction' that brings total flux down by factor of
(mean of) diff b/t measured and catalog flux. This is really just
an aperture correction that accounts for how far out we were able
to measure aperture photometry.'''
# initialize lists for the measured fluxes
totMeas = []; totErr = []; divErr = []; catfl = []
# for every filter where we have resolved photometry, find the
# total flux we measured in that filter
for im in images:
# measured flux & error
totMeas.append(np.sum(self.photometry[im])) # total flux
totErr.append(np.sqrt(np.sum(np.array(self.photometry[im+'_err'])**2.))) # flux error
# calculate the percentage error (used to weight each filter)
divErr.append(totMeas[-1]/self.catalogFlux['f_'+im.lower()] * \
np.sqrt((totErr[-1]/totMeas[-1])**2. +
(self.catalogError['f_'+im.lower()] /
self.catalogFlux['f_'+im.lower()])**2.))
# record the catalog flux in that filter
catfl.append(self.catalogFlux['f_'+im.lower()])
# do weighted average for correction (making sure to leave out NaNs)
self.corr = np.average(np.nan_to_num(np.array(totMeas)/np.array(catfl)),
weights=np.nan_to_num(1/np.array(divErr)**2.))
def get_Lg(self, images, translate):
'''Get the flux in the *rest-frame* SDSS g band filter as a function
of radius. We want to use EAZY to interpolate our measured ~5-8 bands
to the rest-frame g band. This is to report M/L_g for comparison
to literature values.'''
# write out a catalog file for EAZY
with open('EAZY/Lg.cat', 'w') as f:
# write header
f.write('# id\tz_spec ')
# write out the 'translate' value for each measured resolved
# filter in header so EAZY knows where to find filter curves
s = ''
for im in images:
s+=translate[im][1] + '\t'
s+='E'+translate[im][1][1:] + '\t'
f.write(s+'\n')
# write each annulus as a separate "galaxy"
for i in range(self.nAnnuli):
# id and redshift
f.write(str(i) + '\t' + str(self.z) + '\t')
# flux & error for each band of resolved photometry
# make sure these are in zpt 25 fluxes (that's what
# EAZY expects!)
for im in images:
# make sure photometry was measured well
if not np.max(np.isnan(self.photometry[im])):
f.write('{:.5f}'.format(Fnu25(self.photometry[im][i],
translate[im][0]))+'\t')
f.write('{:.5f}'.format(Fnu25(self.photometry[im+'_err'][i],
translate[im][0])) + '\t')
# otherwise write out no-data-values
else:
f.write('-99\t-99\t')
f.write('\n')
# now we've written the catalog, run EAZY on rest-frame g filter
os.chdir("EAZY")
return_code = subprocess.call("./eazy > 'logLg.log'", shell=True)
os.chdir("..")
# after running EAZY, read in EAZY results
# save them so we have for later plotting
g = np.loadtxt('EAZY/OUTPUT/Lg.157.rf')[:,5]
self.Lg = flux2lum((g * 10**(-29.44)), 3e18/lamg, self.z)
self.LgErr = flux2lum(self.photometry['F160W_err'], translate['F160W'][0], self.z)
# also save L_F160W for fitting
self.LF160 = flux2lum(self.photometry['F160W'], translate['F160W'][0], self.z)
self.LF160Err = flux2lum(self.photometry['F160W_err'], translate['F160W'][0], self.z)
def read_fast(self, IDs, folder, fastOut, grid):
''' Once we've run FAST to calculate the mass in each annulus, need to
actually read in the FAST results! This reads in the chi and scale grid for
all annuli. We'll carry them around with the galaxy object while doing the
analysis, but remove them before saving the file to reduce total size | |
# -*- coding: utf-8 -*-
"""
Script to perform nuisance regression and timeseries extraction on CBS data
USAGE: python3 nuisRegr.py [--mask] folder
The script first lists all functional runs for which we have EPI data
and truncated confounds file. Then it applies
nilearn's NiftiMasker to each which calls signal.clean internally, using
the supplied confounds file. We also perform standardization, spatial
smoothing and high-pass filtering.
Importantly, we only look for run1 and run2 files.
By default, the script uses the brainmask in the fmriprep output
corresponding to the EPI file, change this behavior with the --mask
option, by supplying the mask to be used.
Input arg:
folder: path of data folder
Option --mask, -m:
A path to the mask to be used OR
"nilearnMNI" to use the built-in MNI152 brainmask in nilearn
If --mask is not supplied, the script looks for a brainmask in the
standard fmriprep output form and uses that. Be careful with your own
mask, check if its affine matches the EPI file affines
Output is a saved out np array for extracted and cleaned timeseries for
each functional run
Created on Thu Apr 5 05:53:26 2018
@author: adamb
"""
import argparse
from nilearn import image as nImage
from nilearn import input_data
from nilearn import datasets as nDatasets
import os
import glob
import numpy as np
import pandas as pd
import nibabel as nb
from scipy import io as sio
#%% Params, magic numbers
def params():
# Search terms for glob.glob
# by default we use the images registered to the MNI 152 nonlinear
# 2009c template
epiTerms = ['/**/*run-1_bold_space-'
'MNI152NLin2009cAsym_preproc.nii.gz',
'/**/*run-2_bold_space-'
'MNI152NLin2009cAsym_preproc.nii.gz'
]
brainmaskTerm = 'brainmask.nii.gz'
confoundsTerm = 'confounds_truncated.csv'
switchNo = [-1, -2]
# !! WATCH OUT FOR SETTING THE RIGHT TR VALUE !!
tr = 1.9
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# glue this to the end of the epi file name for the output file
saveFileAdd = '_nuisRegr'
# switch NaN values (if any) in the confound file to this vale
NaNValue = 0
# settings for the NiftiMasker object
masker_verbose = 2
masker_cutoff = 0.01 # high-pass filter cutoff
masker_fwhm = 8 # spatial smoothing
masker_stand = True # standardization
# mni153 nonlin 2009c affine when resampled to 3x3x3
expectedAffine = np.array([[3, 0, 0, -96],
[0, 3, 0, -132],
[0, 0, 3, -78],
[0, 0, 0, 1]
])
expectedShape = (65, 77, 65)
# cutoff when we resample the built-in MNI152 brainmask from nilearn
# adn have to deal with values between 0 and 1
maskBoolCutoff = 0.1
return [epiTerms, brainmaskTerm, confoundsTerm, switchNo, tr,
saveFileAdd, NaNValue, masker_verbose, masker_cutoff,
masker_fwhm, masker_stand, expectedAffine, expectedShape,
maskBoolCutoff]
#%% Function to list all files in input folder eligible for cleaning
def listFiles(folder, epiTerms, brainmaskTerm, confoundsTerm, switchNo):
'''
List all eligible files by globbing the given pattern and verifying
the existence of corresponding confounds file
'''
# init final result lists
epiFiles = []
maskFiles = []
confFiles = []
# go through all given glob prompts
for term in epiTerms:
tempList = glob.glob(folder + term, recursive=True)
# check each result for brainmask + confounds
for epiFile in tempList:
fileFolder = os.path.dirname(epiFile)
# all files are in nii.gz, need to splitext twice
fileBase = os.path.splitext(os.path.splitext(os.path.basename(epiFile))[0])[0]
# brainmask is only different in the last element of filename
fileBaseParts = fileBase.split('_') # split into BIDS parts
maskFile = '_'.join(fileBaseParts[0:switchNo[0]] +
[brainmaskTerm]) # switch last element
# confounds file is different in last two elements
confFile = '_'.join(fileBaseParts[0:switchNo[1]] +
[confoundsTerm])
# if confound file exists, we append final result lists
if os.path.exists(fileFolder + '/' + confFile):
epiFiles.append(epiFile)
confFiles.append(fileFolder + '/' + confFile)
# maskFiles is appended only if file exists
if os.path.exists(fileFolder + '/' + maskFile):
maskFiles.append(fileFolder + '/' + maskFile)
else:
maskFiles.append('')
return epiFiles, maskFiles, confFiles
#%% function to load MNI brain mask and resample it to affine of our MNI data
def getMask(epiFiles, expectedAffine, expectedShape, maskBoolCutoff):
'''
Function to load built-in nilearn MNI template and resample to
fmriprep MNI output affine (3x3x3 mm and different offsets)
'''
# Sanity check:
# load a sample epi file, get affine, compare with our pre-set one
epiExample = nImage.load_img(epiFiles[0])
if not np.array_equal(expectedAffine, epiExample.affine):
raise ValueError('\nExpected affine does not match affine '
'of first EPI file')
# resample built-in MNI mask to the affine of the fmriprep outputs
maskMNI = nDatasets.load_mni152_brain_mask()
maskMNIResampled = nImage.resample_img(maskMNI,
target_affine=expectedAffine,
target_shape=expectedShape)
# deal with the effects of continuous interpolation, get boolean again
dataMask = maskMNIResampled.get_data() # data part of img
dataMask[dataMask >= maskBoolCutoff] = 1
dataMask[dataMask < maskBoolCutoff] = 0
# new img from changed data
newMaskImg = nb.Nifti1Image(dataMask,
maskMNIResampled.affine,
maskMNIResampled.header)
return newMaskImg
#%% Function to deal with NaNs in confounds cvs
def confoundNaN(file, value):
'''
Function to swap NaN values in confound csv files to stg else
'''
# load csv
data = pd.read_csv(file)
# go on if there are NaN values
if data.isnull().values.any():
print('\nSwapping NaN values in', file)
# replace NaN with "value"
data.fillna(value, inplace=True)
# save out results with same name
data.to_csv(file, index=False)
return
#%% Call nilearn and do masking + cleaning + extraction on epi file
def callNilearn(epiFile, maskImg, confFile, tr, saveFileAdd,
masker_verbose, masker_cutoff, masker_fwhm,
masker_stand):
'''
Nuisance regression for given epi input file using a brainmask
'''
# create savepath for cleaned timseries array
fileFolder = os.path.dirname(epiFile)
fileBase, ext = os.path.splitext(os.path.basename(epiFile))
# if file was .nii.gz, we split again to get basename
if ext == '.gz':
fileBase = os.path.splitext(os.path.basename(fileBase))[0]
savePath = fileFolder + '/' + fileBase + saveFileAdd + '.nii.gz'
# Sanity check - compare EPI affine to brainmask affine
tempImg = nImage.load_img(epiFile)
if not np.array_equal(tempImg.affine, maskImg.affine):
print('WARNING! EPI affine does not match resampled MNI mask '
'affine!\nEPI.affine:', tempImg.affine, '\nMask.affine:',
maskImg.affine)
# create a masker object: spatial smoothing, high pass filtering,
# standardization are all to be performed on masked image
masker = input_data.NiftiMasker(maskImg,
t_r=tr,
verbose=masker_verbose,
high_pass=masker_cutoff,
smoothing_fwhm=masker_fwhm,
standardize=masker_stand)
print('\nCreated masker object. NiftiMasker verbosity set to',
masker_verbose, '\n')
# clean + extract timeseries
tseries = masker.fit_transform(epiFile, confounds=confFile)
print('\nNp array size:', str(tseries.shape))
# transform back to image, save out
cleaned_img = masker.inverse_transform(tseries)
print('\nCleaned img shape:', cleaned_img.header.get_data_shape())
print('\nCleaned img affine:\n', cleaned_img.affine)
# save out img
cleaned_img.to_filename(savePath)
# save out np array into mat file
savematFile = fileFolder + '/' + fileBase + saveFileAdd + '.mat'
sio.savemat(savematFile, {'tseries': tseries})
return savePath
#%% Nuisance regression + standardization
def main():
# parse ipnut arguments
parser = argparse.ArgumentParser()
# Input arg "folder"
parser.add_argument(
'folder',
type=str,
help='Folder of CBS data. We use glob to list all eligible '
'epi files before applying nuisance regression and '
'timeseries extraction. Glob is recursive!')
# Input option "--mask"
parser.add_argument(
'-m',
'--mask',
nargs='?',
type=str,
default='',
help='Path to brainmask. This mask will be used for the '
'NiftiMasker object. Default is none, then the script '
'expects a brainmask for every EPI file in fmriprep style.')
# parse arguments, get list
args = parser.parse_args()
# check inputs
if not os.path.exists(args.folder):
raise ValueError('Input arg "folder" is not found')
if not os.path.exists(args.mask):
raise ValueError('File supplied for --mask is not found')
# start messages
print('\n\nnuisRegr.py was started with input folder',
args.folder)
if args.mask:
if args.mask == 'nilearnMNI':
print('\nWill use nilearn\'s MNI152 mask resampled to 3x3x3')
else:
print('\nWill use the brainmask at', args.mask)
else:
print('\nWill use fmriprep output brainmask for each EPI file')
# load magic numbers / preset parameters
[epiTerms, brainmaskTerm, confoundsTerm,
switchNo, tr, saveFileAdd, NaNValue,
masker_verbose, masker_cutoff,
masker_fwhm, masker_stand,
expectedAffine, expectedShape, maskBoolCutoff] = params()
print('\nLoaded preset parameters. Masker inputs: ',
'\nmasker_verbose = ' + str(masker_verbose) +
';\nmasker_cutoff = ' + str(masker_cutoff) +
';\nmasker_fwhm = ' + str(masker_fwhm) +
';\nmasker_stand = ' + str(masker_stand))
if args.mask == 'nilearnMNI':
print('\nNilearn MNI152 mask affine will be set to\n', expectedAffine)
# list eligible files
epiFiles, maskFiles, confFiles = listFiles(args.folder,
epiTerms,
brainmaskTerm,
confoundsTerm,
switchNo)
print('\nFound', str(len(epiFiles)), 'epi files with '
'corresponding *' + confoundsTerm + ' file.')
# get brainmask
if args.mask == 'nilearnMNI': # if nilearn MNI mask is used
# Create a common MNI brain mask with same parameters as our EPI data
print('\nResampling nilearn\s MNI152 brainmask to match affine '
'of EPI files. ')
maskImg = getMask(epiFiles,
expectedAffine,
expectedShape,
maskBoolCutoff)
print('Resampling done.')
elif not args.mask: # if we use | |
ATL Disdik SMPN 3 Halong',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 3 Halong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN3Paringin',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 3 Paringin',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 3 Paringin',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN4Awayan',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 4 Awayan',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 4 Awayan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN4Batumandi',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 4 Batumandi',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 4 Batumandi',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN4Halong',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 4 Halong',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 4 Halong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN4Paringin',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 4 Paringin',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 4 Paringin',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN5Halong',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 5 Halong',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 5 Halong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN5Paringin',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 5 Paringin',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 5 Paringin',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikTebingTinggi',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik Tebing Tinggi',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDishub',
fields=[
],
options={
'verbose_name': '04 Harga ATL Dishub',
'proxy': True,
'verbose_name_plural': '04 Harga ATL Dishub',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisnakertrans',
fields=[
],
options={
'verbose_name': '41 Harga ATL Disnakertrans',
'proxy': True,
'verbose_name_plural': '41 Harga ATL Disnakertrans',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDistamben',
fields=[
],
options={
'verbose_name': '17 Harga ATL Distamben',
'proxy': True,
'verbose_name_plural': '17 Harga ATL Distamben',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDKO',
fields=[
],
options={
'verbose_name': '23 Harga ATL DKO',
'proxy': True,
'verbose_name_plural': '23 Harga ATL DKO',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDKP',
fields=[
],
options={
'verbose_name': '15 Harga ATL DKP',
'proxy': True,
'verbose_name_plural': '15 Harga ATL DKP',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDKUKMP',
fields=[
],
options={
'verbose_name': '16 Harga ATL DKUKMP',
'proxy': True,
'verbose_name_plural': '16 Harga ATL DKUKMP',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDLH',
fields=[
],
options={
'verbose_name': '22 Harga ATL DLH',
'proxy': True,
'verbose_name_plural': '22 Harga ATL DLH',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDPKP',
fields=[
],
options={
'verbose_name': '40 Harga ATL DPKP',
'proxy': True,
'verbose_name_plural': '40 Harga ATL DPKP',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDPMD',
fields=[
],
options={
'verbose_name': '10 Harga ATL DPMD',
'proxy': True,
'verbose_name_plural': '10 Harga ATL DPMD',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDPMPTSP',
fields=[
],
options={
'verbose_name': '18 Harga ATL DPMPTSP',
'proxy': True,
'verbose_name_plural': '18 Harga ATL DPMPTSP',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDPPKB',
fields=[
],
options={
'verbose_name': '42 Harga ATL DPPKB',
'proxy': True,
'verbose_name_plural': '42 Harga ATL DPPKB',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDPPPA',
fields=[
],
options={
'verbose_name': '11 Harga ATL DPPPA',
'proxy': True,
'verbose_name_plural': '11 Harga ATL DPPPA',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDPUPR',
fields=[
],
options={
'verbose_name': '03 Harga ATL DPUPR',
'proxy': True,
'verbose_name_plural': '03 Harga ATL DPUPR',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDukCatPil',
fields=[
],
options={
'verbose_name': '12 Harga ATL DukCatPil',
'proxy': True,
'verbose_name_plural': '12 Harga ATL DukCatPil',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLHalong',
fields=[
],
options={
'verbose_name': '35 Harga ATL Halong',
'proxy': True,
'verbose_name_plural': '35 Harga ATL Halong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLInspektorat',
fields=[
],
options={
'verbose_name': '20 Harga ATL Inspektorat',
'proxy': True,
'verbose_name_plural': '20 Harga ATL Inspektorat',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLJuai',
fields=[
],
options={
'verbose_name': '33 Harga ATL Juai',
'proxy': True,
'verbose_name_plural': '33 Harga ATL Juai',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLKearsipan',
fields=[
],
options={
'verbose_name': '44 Harga ATL Kearsipan',
'proxy': True,
'verbose_name_plural': '44 Harga ATL Kearsipan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLKehutanan',
fields=[
],
options={
'verbose_name': '14 Harga ATL Kehutanan',
'proxy': True,
'verbose_name_plural': '14 Harga ATL Kehutanan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLKESBANGPOL',
fields=[
],
options={
'verbose_name': '24 Harga ATL KESBANGPOL',
'proxy': True,
'verbose_name_plural': '24 Harga ATL KESBANGPOL',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLKominfo',
fields=[
],
options={
'verbose_name': '43 Harga ATL Kominfo',
'proxy': True,
'verbose_name_plural': '43 Harga ATL Kominfo',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLLampihong',
fields=[
],
options={
'verbose_name': '31 Harga ATL Lampihong',
'proxy': True,
'verbose_name_plural': '31 Harga ATL Lampihong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLParingin',
fields=[
],
options={
'verbose_name': '28 Harga ATL Paringin',
'proxy': True,
'verbose_name_plural': '28 Harga ATL Paringin',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLParinginKota',
fields=[
],
options={
'verbose_name': '29 Harga ATL Paringin Kota',
'proxy': True,
'verbose_name_plural': '29 Harga ATL Paringin Kota',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLParinginSelatan',
fields=[
],
options={
'verbose_name': '36 Harga ATL Paringin Selatan',
'proxy': True,
'verbose_name_plural': '36 Harga ATL Paringin Selatan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLParinginTimur',
fields=[
],
options={
'verbose_name': '30 Harga ATL Paringin Timur',
'proxy': True,
'verbose_name_plural': '30 Harga ATL Paringin Timur',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLPariwisata',
fields=[
],
options={
'verbose_name': '46 Harga ATL Pariwisata',
'proxy': True,
'verbose_name_plural': '46 Harga ATL Pariwisata',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLPerdagangan',
fields=[
],
options={
'verbose_name': '47 Harga ATL Perdagangan',
'proxy': True,
'verbose_name_plural': '47 Harga ATL Perdagangan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLPerikanan',
fields=[
],
options={
'verbose_name': '45 Harga ATL Perikanan',
'proxy': True,
'verbose_name_plural': '45 Harga ATL Perikanan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLPerpustakaan',
fields=[
],
options={
'verbose_name': '08 Harga ATL Perpustakaan',
'proxy': True,
'verbose_name_plural': '08 Harga ATL Perpustakaan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLPertanian',
fields=[
],
options={
'verbose_name': '13 Harga ATL Pertanian',
'proxy': True,
'verbose_name_plural': '13 Harga ATL Pertanian',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLRSUD',
fields=[
],
options={
'verbose_name': '06 Harga ATL RSUD',
'proxy': True,
'verbose_name_plural': '06 Harga ATL RSUD',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLSATPOLPP',
fields=[
],
options={
'verbose_name': '25 Harga ATL SATPOLPP',
'proxy': True,
'verbose_name_plural': '25 Harga ATL SATPOLPP',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLSekretariatKorpri',
fields=[
],
options={
'verbose_name': '27 Harga ATL Sekretariat Korpri',
'proxy': True,
'verbose_name_plural': '27 Harga ATL Sekretariat Korpri',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLSetda',
fields=[
],
options={
'verbose_name': '02 Harga ATL Setda',
'proxy': True,
'verbose_name_plural': '02 Harga ATL Setda',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLSetwan',
fields=[
],
options={
'verbose_name': '01 Harga ATL Setwan',
'proxy': True,
'verbose_name_plural': '01 Harga ATL Setwan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLSosial',
fields=[
],
options={
'verbose_name': '09 Harga ATL Sosial',
'proxy': True,
'verbose_name_plural': '09 Harga ATL Sosial',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLTebingTinggi',
fields=[
],
options={
'verbose_name': '38 Harga ATL Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '38 Harga ATL Tebing Tinggi',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='KontrakATLAwayan',
fields=[
],
options={
'verbose_name': '34 Kontrak ATL Awayan',
'proxy': True,
'verbose_name_plural': '34 Kontrak ATL Awayan',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLBAPPEDA',
fields=[
],
options={
'verbose_name': '21 Kontrak ATL BAPPEDA',
'proxy': True,
'verbose_name_plural': '21 Kontrak ATL BAPPEDA',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLBatumandi',
fields=[
],
options={
'verbose_name': '32 Kontrak ATL Batumandi',
'proxy': True,
'verbose_name_plural': '32 Kontrak ATL Batumandi',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLBatuPiring',
fields=[
],
options={
'verbose_name': '37 Kontrak ATL Batu Piring',
'proxy': True,
'verbose_name_plural': '37 Kontrak ATL Batu Piring',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLBKD',
fields=[
],
options={
'verbose_name': '19 Kontrak ATL BKD',
'proxy': True,
'verbose_name_plural': '19 Kontrak ATL BKD',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLBKPPD',
fields=[
],
options={
'verbose_name': '26 Kontrak ATL BKPPD',
'proxy': True,
'verbose_name_plural': '26 Kontrak ATL BKPPD',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLBPBD',
fields=[
],
options={
'verbose_name': '39 Kontrak ATL BPBD',
'proxy': True,
'verbose_name_plural': '39 Kontrak ATL BPBD',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLBPPD',
fields=[
],
options={
'verbose_name': '48 Kontrak ATL BPPD',
'proxy': True,
'verbose_name_plural': '48 Kontrak ATL BPPD',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLDinkes',
fields=[
],
options={
'verbose_name': '05 Kontrak ATL Dinkes',
'proxy': True,
'verbose_name_plural': '05 Kontrak ATL Dinkes',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLDisdik',
fields=[
],
options={
'verbose_name': '07 Kontrak ATL Disdik',
'proxy': True,
'verbose_name_plural': '07 Kontrak ATL Disdik',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLDishub',
fields=[
],
options={
'verbose_name': '04 Kontrak ATL Dishub',
'proxy': True,
'verbose_name_plural': '04 Kontrak ATL Dishub',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLDisnakertrans',
fields=[
],
options={
'verbose_name': '41 Kontrak ATL Disnakertrans',
'proxy': True,
'verbose_name_plural': '41 Kontrak ATL Disnakertrans',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLDistamben',
fields=[
],
options={
'verbose_name': '17 Kontrak ATL Distamben',
'proxy': True,
'verbose_name_plural': '17 Kontrak ATL Distamben',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLDKO',
fields=[
],
options={
'verbose_name': '23 Kontrak ATL DKO',
'proxy': True,
'verbose_name_plural': '23 Kontrak ATL DKO',
},
bases=('atl.kontrakatl',),
),
migrations.CreateModel(
name='KontrakATLDKP',
fields=[
],
| |
<gh_stars>0
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Base test case class for coverage.py testing."""
import contextlib
import datetime
import difflib
import glob
import os
import os.path
import random
import re
import shlex
import sys
import pytest
from unittest_mixins import EnvironmentAwareMixin, TempDirMixin
import coverage
from coverage import env
from coverage.backunittest import TestCase
from coverage.backward import StringIO, import_local_file, string_class, shlex_quote
from coverage.cmdline import CoverageScript
from tests.helpers import arcs_to_arcz_repr, arcz_to_arcs
from tests.helpers import run_command, SuperModuleCleaner
from tests.mixins import StdStreamCapturingMixin, StopEverythingMixin
# Status returns for the command line.
OK, ERR = 0, 1
# The coverage/tests directory, for all sorts of finding test helping things.
TESTS_DIR = os.path.dirname(__file__)
class CoverageTest(
EnvironmentAwareMixin,
StdStreamCapturingMixin,
TempDirMixin,
StopEverythingMixin,
TestCase,
):
"""A base class for coverage.py test cases."""
# Standard unittest setting: show me diffs even if they are very long.
maxDiff = None
# Tell newer unittest implementations to print long helpful messages.
longMessage = True
# Let stderr go to stderr, pytest will capture it for us.
show_stderr = True
# Temp dirs go to $TMPDIR/coverage_test/*
temp_dir_prefix = "coverage_test/"
if os.getenv('COVERAGE_ENV_ID'):
temp_dir_prefix += "{}/".format(os.getenv('COVERAGE_ENV_ID'))
# Keep the temp directories if the env says to.
# $set_env.py: COVERAGE_KEEP_TMP - Keep the temp directories made by tests.
keep_temp_dir = bool(int(os.getenv("COVERAGE_KEEP_TMP", "0")))
def setUp(self):
super(CoverageTest, self).setUp()
self.module_cleaner = SuperModuleCleaner()
# Attributes for getting info about what happened.
self.last_command_status = None
self.last_command_output = None
self.last_module_name = None
def clean_local_file_imports(self):
"""Clean up the results of calls to `import_local_file`.
Use this if you need to `import_local_file` the same file twice in
one test.
"""
self.module_cleaner.clean_local_file_imports()
def start_import_stop(self, cov, modname, modfile=None):
"""Start coverage, import a file, then stop coverage.
`cov` is started and stopped, with an `import_local_file` of
`modname` in the middle. `modfile` is the file to import as `modname`
if it isn't in the current directory.
The imported module is returned.
"""
cov.start()
try: # pragma: nested
# Import the Python file, executing it.
mod = import_local_file(modname, modfile)
finally: # pragma: nested
# Stop coverage.py.
cov.stop()
return mod
def get_module_name(self):
"""Return a random module name to use for this test run."""
self.last_module_name = 'coverage_test_' + str(random.random())[2:]
return self.last_module_name
def _check_arcs(self, a1, a2, arc_type):
"""Check that the arc lists `a1` and `a2` are equal.
If they are equal, return empty string. If they are unequal, return
a string explaining what is different.
"""
# Make them into multi-line strings so we can see what's going wrong.
s1 = arcs_to_arcz_repr(a1)
s2 = arcs_to_arcz_repr(a2)
if s1 != s2:
lines1 = s1.splitlines(keepends=True)
lines2 = s2.splitlines(keepends=True)
diff = "".join(difflib.ndiff(lines1, lines2))
return "\n" + arc_type + " arcs differ: minus is expected, plus is actual\n" + diff
else:
return ""
def check_coverage(
self, text, lines=None, missing="", report="",
excludes=None, partials="",
arcz=None, arcz_missing="", arcz_unpredicted="",
arcs=None, arcs_missing=None, arcs_unpredicted=None,
):
"""Check the coverage measurement of `text`.
The source `text` is run and measured. `lines` are the line numbers
that are executable, or a list of possible line numbers, any of which
could match. `missing` are the lines not executed, `excludes` are
regexes to match against for excluding lines, and `report` is the text
of the measurement report.
For arc measurement, `arcz` is a string that can be decoded into arcs
in the code (see `arcz_to_arcs` for the encoding scheme).
`arcz_missing` are the arcs that are not executed, and
`arcz_unpredicted` are the arcs executed in the code, but not deducible
from the code. These last two default to "", meaning we explicitly
check that there are no missing or unpredicted arcs.
Returns the Coverage object, in case you want to poke at it some more.
"""
# We write the code into a file so that we can import it.
# Coverage.py wants to deal with things as modules with file names.
modname = self.get_module_name()
self.make_file(modname + ".py", text)
if arcs is None and arcz is not None:
arcs = arcz_to_arcs(arcz)
if arcs_missing is None:
arcs_missing = arcz_to_arcs(arcz_missing)
if arcs_unpredicted is None:
arcs_unpredicted = arcz_to_arcs(arcz_unpredicted)
# Start up coverage.py.
cov = coverage.Coverage(branch=True)
cov.erase()
for exc in excludes or []:
cov.exclude(exc)
for par in partials or []:
cov.exclude(par, which='partial')
mod = self.start_import_stop(cov, modname)
# Clean up our side effects
del sys.modules[modname]
# Get the analysis results, and check that they are right.
analysis = cov._analyze(mod)
statements = sorted(analysis.statements)
if lines is not None:
if isinstance(lines[0], int):
# lines is just a list of numbers, it must match the statements
# found in the code.
assert statements == lines, "{!r} != {!r}".format(statements, lines)
else:
# lines is a list of possible line number lists, one of them
# must match.
for line_list in lines:
if statements == line_list:
break
else:
self.fail("None of the lines choices matched %r" % statements)
missing_formatted = analysis.missing_formatted()
if isinstance(missing, string_class):
msg = "{!r} != {!r}".format(missing_formatted, missing)
assert missing_formatted == missing, msg
else:
for missing_list in missing:
if missing_formatted == missing_list:
break
else:
self.fail("None of the missing choices matched %r" % missing_formatted)
if arcs is not None:
# print("Possible arcs:")
# print(" expected:", arcs)
# print(" actual:", analysis.arc_possibilities())
# print("Executed:")
# print(" actual:", sorted(set(analysis.arcs_executed())))
# TODO: this would be nicer with pytest-check, once we can run that.
msg = (
self._check_arcs(arcs, analysis.arc_possibilities(), "Possible") +
self._check_arcs(arcs_missing, analysis.arcs_missing(), "Missing") +
self._check_arcs(arcs_unpredicted, analysis.arcs_unpredicted(), "Unpredicted")
)
if msg:
assert False, msg
if report:
frep = StringIO()
cov.report(mod, file=frep, show_missing=True)
rep = " ".join(frep.getvalue().split("\n")[2].split()[1:])
assert report == rep, "{!r} != {!r}".format(report, rep)
return cov
@contextlib.contextmanager
def assert_warnings(self, cov, warnings, not_warnings=()):
"""A context manager to check that particular warnings happened in `cov`.
`cov` is a Coverage instance. `warnings` is a list of regexes. Every
regex must match a warning that was issued by `cov`. It is OK for
extra warnings to be issued by `cov` that are not matched by any regex.
Warnings that are disabled are still considered issued by this function.
`not_warnings` is a list of regexes that must not appear in the
warnings. This is only checked if there are some positive warnings to
test for in `warnings`.
If `warnings` is empty, then `cov` is not allowed to issue any
warnings.
"""
saved_warnings = []
def capture_warning(msg, slug=None, once=False): # pylint: disable=unused-argument
"""A fake implementation of Coverage._warn, to capture warnings."""
# NOTE: we don't implement `once`.
if slug:
msg = "%s (%s)" % (msg, slug)
saved_warnings.append(msg)
original_warn = cov._warn
cov._warn = capture_warning
try:
yield
except: # pylint: disable=try-except-raise
raise
else:
if warnings:
for warning_regex in warnings:
for saved in saved_warnings:
if re.search(warning_regex, saved):
break
else:
self.fail("Didn't find warning %r in %r" % (warning_regex, saved_warnings))
for warning_regex in not_warnings:
for saved in saved_warnings:
if re.search(warning_regex, saved):
self.fail("Found warning %r in %r" % (warning_regex, saved_warnings))
else:
# No warnings expected. Raise if any warnings happened.
if saved_warnings:
self.fail("Unexpected warnings: %r" % (saved_warnings,))
finally:
cov._warn = original_warn
def nice_file(self, *fparts):
"""Canonicalize the file name composed of the parts in `fparts`."""
fname = os.path.join(*fparts)
return os.path.normcase(os.path.abspath(os.path.realpath(fname)))
def assert_same_files(self, flist1, flist2):
"""Assert that `flist1` and `flist2` are the same set of file names."""
flist1_nice = [self.nice_file(f) for f in flist1]
flist2_nice = [self.nice_file(f) for f in flist2]
self.assertCountEqual(flist1_nice, flist2_nice)
def assert_exists(self, fname):
"""Assert that `fname` is a file that exists."""
msg = "File %r should exist" % fname
assert os.path.exists(fname), msg
def assert_doesnt_exist(self, fname):
"""Assert that `fname` is a file that doesn't exist."""
msg = "File %r shouldn't exist" % fname
assert not os.path.exists(fname), msg
def assert_file_count(self, pattern, count):
"""Assert that there are `count` files matching `pattern`."""
files = sorted(glob.glob(pattern))
msg = "There should be {} files matching {!r}, but there are these: {}"
msg = msg.format(count, pattern, files)
assert len(files) == count, msg
def assert_starts_with(self, s, prefix, msg=None):
"""Assert that `s` starts with `prefix`."""
if not s.startswith(prefix):
self.fail(msg or ("%r doesn't start with %r" % (s, prefix)))
def assert_recent_datetime(self, dt, seconds=10, msg=None):
"""Assert that `dt` marks a time at most `seconds` seconds ago."""
age = datetime.datetime.now() - dt
assert age.total_seconds() >= 0, msg
assert age.total_seconds() <= seconds, msg
def command_line(self, args, ret=OK):
"""Run `args` through the command line.
Use this when you want | |
<filename>setupmeta/model.py
"""
Model of our view on how setup.py + files in a project can come together
"""
import inspect
import io
import os
import re
import sys
import setuptools
import setupmeta.versioning
from setupmeta import listify, MetaDefs, project_path, relative_path, short, temp_resource, trace
from setupmeta.content import find_contents, load_contents, load_list, load_readme, resolved_paths
from setupmeta.license import determined_license
# Used to mark which key/values were provided explicitly in setup.py
EXPLICIT = "explicit"
CLASSIFIERS = "classifiers.txt"
READMES = ["README.rst", "README.md", "README*"]
RE_WORDS = re.compile(r"[^\w]+")
# Accept reasonable variations of name + some separator + email
RE_EMAIL = re.compile(r"(.+)[\s<>()\[\],:;]+([^@]+@[a-zA-Z0-9._-]+)")
# Finds simple values of the form: __author__ = 'Someone'
RE_PY_VALUE = re.compile(r'^__([a-z_]+)__\s*=\s*u?[\'"](.+?)[\'"]\s*(#.+)?$')
# Finds simple docstring entries like: author: <NAME>
RE_DOC_VALUE = re.compile(r"^([a-z_]+)\s*[:=]\s*(.+?)(\s*#.+)?$")
# Beautify short description
RE_DESCRIPTION = re.compile(r"^[\W\s]*((([\w\-]+)\s*[:-])?\s*(.+))$", re.IGNORECASE)
KNOWN_SECTIONS = set("abstract pinned indirect".split())
def first_word(text):
"""
:param str|None text: Text to extract first word from
:return str: Lower case of first word from 'text', if any
"""
if text:
text = text.strip()
if not text:
return text
return text.split()[0].lower()
def is_setup_py_path(path):
""" Is 'path' pointing to a setup.py module? """
if path:
# Accept also setup.pyc
return os.path.basename(path).startswith("setup.py")
def content_type_from_filename(filename):
"""Determined content type from 'filename'"""
if filename:
if filename.endswith(".rst"):
return "text/x-rst"
if filename.endswith(".md"):
return "text/markdown"
return None
class DefinitionEntry:
""" Record of where a definition was found and where it came from """
def __init__(self, key, value, source):
"""
:param str key: Key (for setuptools.setup()) being defined
:param value: Value
:param str source: Source where this definition entry was found
"""
self.key = key
self.value = value
self.source = source
def __repr__(self):
return "%s=%s from %s" % (self.key, short(self.value), self.source)
@property
def is_explicit(self):
""" Did this entry come explicitly from setup(**attrs)? """
return self.source == EXPLICIT
class Definition(object):
""" Record definitions for a given key, and where they were found """
def __init__(self, key):
"""
:param str key: Key being defined
"""
self.key = key
self.value = None
self.sources = [] # type: list[DefinitionEntry]
def __repr__(self):
if len(self.sources) == 1:
source = self.sources[0].source
else:
source = "%s sources" % len(self.sources)
return "%s=%s from %s" % (self.key, short(self.value), source)
def __eq__(self, other):
return isinstance(other, Definition) and self.key is other.key
def __lt__(self, other):
return isinstance(other, Definition) and self.key < other.key
@property
def actual_source(self):
"""Actual source, first non-adjusted source"""
for source in self.sources:
if source.source and not source.source.startswith("auto-"):
return source.source
@property
def source(self):
""" Winning source """
if self.sources:
return self.sources[0].source
@property
def is_explicit(self):
""" Did this entry come explicitly from setup(**attrs)? """
return any(s.is_explicit for s in self.sources)
def merge_sources(self, sources):
""" Record the fact that we saw this definition in 'sources' """
for entry in sources:
if not self.value and entry.value:
self.value = entry.value
trace("[-- %s] %s=%s" % (entry.source, self.key, entry.value))
self.sources.append(entry)
def add(self, value, source, override=False):
"""
:param value: Value to add (first value wins, unless override used)
:param str source: Where this key/value came from
:param bool override: If True, 'value' is forcibly taken
"""
if isinstance(source, list):
self.merge_sources(source)
return
if override or not self.value:
self.value = value
entry = DefinitionEntry(self.key, value, source)
if override:
self.sources.insert(0, entry)
trace("[<- %s] %s=%s" % (source, self.key, short(value)))
else:
self.sources.append(entry)
trace("[-> %s] %s=%s" % (source, self.key, short(value)))
@property
def is_meaningful(self):
""" Should this definition make it to the final setup attrs? """
return bool(self.value) or self.is_explicit
class Settings:
""" Collection of key/value pairs with info on where they came from """
def __init__(self):
self.definitions = {} # type: dict[str, Definition]
def __repr__(self):
project_dir = short(MetaDefs.project_dir)
return "%s definitions, %s" % (len(self.definitions), project_dir)
def value(self, key):
""" Value currently associated to 'key', if any """
definition = self.definitions.get(key)
return definition and definition.value
def to_dict(self):
""" Resolved attributes to pass to setuptools """
result = {}
for definition in self.definitions.values():
if definition.is_meaningful:
result[definition.key] = definition.value
return result
def add_definition(self, key, value, source, override=False):
"""
:param str key: Key being defined
:param value: Value to add (first value wins, unless override used)
:param str source: Where this key/value came from
:param bool override: If True, 'value' is forcibly taken
"""
if key and value:
if key in ("keywords", "setup_requires"):
value = listify(value, separator=",")
definition = self.definitions.get(key)
if definition is None:
definition = Definition(key)
self.definitions[key] = definition
definition.add(value, source, override=override)
def merge(self, *others):
""" Merge settings from 'others' """
for other in others:
for definition in other.definitions.values():
self.add_definition(definition.key, definition.value, definition.sources)
class SimpleModule(Settings):
""" Simple settings extracted from a module, such as __about__.py """
def __init__(self, *relative_paths):
"""
:param list(str) relative_paths: Relative path to scan for definitions
"""
Settings.__init__(self)
self.relative_path = os.path.join(*relative_paths)
self.full_path = project_path(*relative_paths)
self.exists = os.path.isfile(self.full_path)
if self.exists:
with io.open(self.full_path, "rt") as fh:
docstring_marker = None
docstring_start = None
docstring = []
line_number = 0
for line in fh:
line_number += 1
line = line.rstrip()
if docstring_marker:
if line.endswith(docstring_marker):
docstring_marker = None
if docstring:
self.scan_docstring(docstring, line_number=docstring_start - 1)
else:
docstring.append(line)
continue
if line.startswith('"""') or line.startswith("'''"):
docstring_marker = line[:3]
if len(line) > 3 and line.endswith(docstring_marker):
# Single docstring line edge case
docstring_marker = None
continue
docstring_start = line_number
docstring.append(line[3:])
continue
self.scan_line(line, RE_PY_VALUE, line_number)
def add_pair(self, key, value, line, **kwargs):
if key and value:
source = self.relative_path
if line:
source = "%s:%s" % (source, line)
self.add_definition(key, value, source, **kwargs)
def scan_docstring(self, lines, line_number=0):
""" Scan docstring for definitions """
if not lines[0]:
# Disregard the 1st empty line, it's very common
lines.pop(0)
line_number += 1
if lines and lines[0]:
if not RE_DOC_VALUE.match(lines[0]):
# Take first non-empty, non key-value line as docstring lead
line = lines.pop(0).rstrip()
line_number += 1
if len(line) > 5 and line[0].isalnum():
self.add_pair("docstring_lead", line, line_number)
if lines and not lines[0]:
# Skip blank line after lead, if any
lines.pop(0)
line_number += 1
for line in lines:
line_number += 1
line = line.rstrip()
if not line or self.scan_line(line, RE_DOC_VALUE, line_number):
# Look at first paragraph after lead only
break
def scan_line(self, line, regex, line_number):
""" Scan 'line' using 'regex', return True if no match found """
m = regex.match(line)
if m:
key = m.group(1)
value = m.group(2)
self.add_pair(key, value, line_number)
return False
return True
def get_pip():
"""We can't assume pip is installed"""
try:
# pip < 10.0
from pip.req import parse_requirements
from pip.download import PipSession
return parse_requirements, PipSession
except ImportError:
pass
try:
# pip >= 10.0
from pip._internal.req import parse_requirements
from pip._internal.download import PipSession
return parse_requirements, PipSession
except ImportError:
pass
try:
# pip >= 19.3
from pip._internal.req import parse_requirements
from pip._internal.network.session import PipSession
return parse_requirements, PipSession
except ImportError:
setupmeta.warn("Can't find PipSession, won't auto-fill requirements")
return None, None
def parse_requirements(requirements):
"""Parse requirements with pip"""
# Note: we can't assume pip is installed
pip_parse_requirements, pip_session = get_pip()
if not pip_parse_requirements or not requirements:
return None, None
reqs = []
links = []
session = pip_session()
try:
if not isinstance(requirements, list):
# Parse given file path as-is (when not abstracting)
for ir in pip_parse_requirements(requirements, session=session):
if ir.link:
if ir.name:
reqs.append(ir.name)
links.append(ir.link.url)
else:
reqs.append(str(ir.req))
return reqs, links
with temp_resource(is_folder=False) as temp:
# Passed list is "complex reqs" that were not abstracted by the simple convention described here:
# https://github.com/zsimic/setupmeta/blob/master/docs/requirements.rst
with open(temp, "wt") as fh:
fh.write("\n".join(requirements))
for ir in pip_parse_requirements(temp, session=session):
if ir.link:
if ir.name:
reqs.append(ir.name)
links.append(ir.link.url)
else:
reqs.append(str(ir.req))
except Exception:
return None, None
return reqs, links
def is_complex_requirement(line):
"""Allows to save importing pip for very simple requirements.txt files"""
return line and (line.startswith("-") or ":" in line)
def pythonified_name(name):
if name:
words = [s.strip() for s in RE_WORDS.split(name)]
name = "_".join(s for s in words if s)
return name
class PackageInfo:
"""Retrieves info from PKG-INFO"""
_canonical_names = {
"classifier": "classifiers",
"description": "long_description",
"description_content_type": "long_description_content_type",
"home_page": "url",
"summary": "description",
}
_list_types = ["classifiers", "long_description"]
def __init__(self, root):
self.path = os.path.join(root, "PKG-INFO")
self.info = {}
self.name = None
self.dependency_links_txt = None
self.entry_points_txt = None
self.requires_txt = None
lines = load_contents(self.path)
if not lines:
return
# Parse PKG-INFO when present
line_number = 0
key = None
for line in lines.split("\n"):
line_number += 1
if line.startswith(" "):
self.info[key].append(line[8:])
continue
if ": " in line:
key, _, value = line.partition(": ")
key = self.canonical_key(key)
if key is None:
continue
if key in self._list_types:
if key not in self.info:
self.info[key] | |
str(r['x1']) + " y: " + str(r['y1']) + " w: " + str(r['width']) + " h: " + str(r['height'])
if len(r['intersections']) == 0:
txt += " " + _("No intersections")
else:
txt += "\n" + _("Count of intersections: ") + str(len(r['intersections'])) + "\n"
txt += str(r['intersections']) + " " + _("Total: ") + str(sum(r['intersections'])) + " " + _("pixels")
self.ui.textEdit.append(txt)
self.ui.textEdit.append("\n" + _("Intersections Coder: ") + self.selected_coders[1])
for r in res1:
txt = "\n" + "x: " + str(r['x1']) + " y: " + str(r['y1']) + " w: " + str(r['width']) + " h: " + str(r['height'])
if len(r['intersections']) == 0:
txt += " " + _("No intersections")
else:
txt += "\n" + _("Count of intersections: ") + str(len(r['intersections'])) + "\n"
txt += str(r['intersections']) + " " + _("Total: ") + str(sum(r['intersections'])) + " " + _("pixels")
self.ui.textEdit.append(txt)
DialogDualCodedImage(self.app, self.file_, res0, res1).exec_()
def agreement_text_file(self):
""" Calculate the two-coder statistics for this code_
Percentage agreement, disgreement and kappa.
Get the start and end position the text file for this cid
Each character that is coded by coder 1 or coder 2 is incremented, resulting in a list of 0, 1, 2
where 0 is no codings at all, 1 is coded by only one coder and 2 is coded by both coders.
'Disagree%':'','A not B':'','B not A':'','K':''
"""
# coded0 and coded1 are the total characters coded by coder 0 and coder 1
total = {'dual_coded': 0, 'single_coded': 0, 'uncoded': 0, 'characters': 0, 'coded0': 0, 'coded1': 0}
cur = self.app.conn.cursor()
sql = "select fulltext from source where id=?"
cur.execute(sql, [self.file_['id']])
fulltext = cur.fetchone()
if fulltext[0] is None or fulltext[0] == "":
return None
sql = "select pos0,pos1,fid from code_text where fid=? and cid=? and owner=?"
cur.execute(sql, [self.file_['id'], self.code_['cid'], self.selected_coders[0]])
res0 = cur.fetchall()
cur.execute(sql, [self.file_['id'], self.code_['cid'], self.selected_coders[1]])
res1 = cur.fetchall()
# Determine the same characters coded by both coders, by adding 1 to each coded character
char_list = [0] * len(fulltext[0])
# List of which coders coded this char: 1 = coder 1, 2= coder2, 12 = coders 1 and 2
char_list_coders = [''] * len(fulltext[0])
for coded in res0:
for char in range(coded[0], coded[1]):
char_list[char] += 1
total['coded0'] += 1
char_list_coders[char] = 'y'
for coded in res1:
for char in range(coded[0], coded[1]):
char_list[char] += 1
total['coded1'] += 1
if char_list_coders[char] == 'y':
char_list_coders[char] = 'g'
else:
char_list_coders[char] = 'b'
uncoded = 0
single_coded = 0
dual_coded = 0
for char in char_list:
if char == 0:
uncoded += 1
if char == 1:
single_coded += 1
if char == 2:
dual_coded += 1
total['dual_coded'] += dual_coded
total['single_coded'] += single_coded
total['uncoded'] += uncoded
total['characters'] += len(fulltext[0])
total['agreement'] = round(100 * (total['dual_coded'] + total['uncoded']) / total['characters'], 2)
total['dual_percent'] = round(100 * total['dual_coded'] / total['characters'], 2)
total['uncoded_percent'] = round(100 * total['uncoded'] / total['characters'], 2)
total['disagreement'] = round(100 - total['agreement'], 2)
# Cohen's Kappa
'''
https://en.wikipedia.org/wiki/Cohen%27s_kappa
k = Po - Pe Po is proportionate agreement (both coders coded this text / all coded text))
------- Pe is probability of random agreement
1 - Pe
Pe = Pyes + Pno
Pyes = proportion Yes by A multiplied by proportion Yes by B
= total['coded0']/total_coded * total['coded1]/total_coded
Pno = proportion No by A multiplied by proportion No by B
= (total_coded - total['coded0']) / total_coded * (total_coded - total['coded1]) / total_coded
IMMEDIATE BELOW IS INCORRECT - RESULTS IN THE TOTAL AGREEMENT SCORE
Po = total['agreement'] / 100
Pyes = total['coded0'] / total['characters'] * total['coded1'] / total['characters']
Pno = (total['characters'] - total['coded0']) / total['characters'] * (total['characters'] - total['coded1']) / total['characters']
BELOW IS BETTER - ONLY LOOKS AT PROPORTIONS OF CODED CHARACTERS
NEED TO CONFIRM THIS IS THE CORRECT APPROACH
'''
total['kappa'] = "zerodiv"
unique_codings = 0
try:
unique_codings = total['coded0'] + total['coded1'] - total['dual_coded']
Po = total['dual_coded'] / unique_codings
Pyes = total['coded0'] / unique_codings * total['coded1'] / unique_codings
Pno = (unique_codings - total['coded0']) / unique_codings * (unique_codings - total['coded1']) / unique_codings
Pe = Pyes * Pno
kappa = round((Po - Pe) / (1 - Pe), 4)
total['kappa'] = kappa
except ZeroDivisionError:
msg_ = _("ZeroDivisionError. unique_codings:") + str(unique_codings)
logger.debug(msg_)
overall = "\nOVERALL SUMMARY\n"
overall += _("Total characters: ") + str(total['characters']) + ", "
overall += _("Dual coded: ") + str(total['dual_coded']) + ", "
overall += _("Single coded: ") + str(total['single_coded']) + ", "
overall += _("Uncoded: ") + str(total['uncoded']) + ", "
overall += _("Coder 0: ") + str(total['coded0']) + ", "
overall += _("Coder 1: ") + str(total['coded1']) + "\n"
overall += _("Agreement between coders: ") + str(total['agreement']) + "%\n"
overall += _("Total text dual coded: ") + str(total['dual_percent']) + "%, "
overall += _("Total text uncoded: ") + str(total['uncoded_percent']) + "%, "
overall += _("Total text disagreement (single coded): ") + str(total['disagreement']) + "%\n"
overall += _("Kappa: ") + str(total['kappa']) + "\n\n"
overall += "FULLTEXT"
self.ui.textEdit.append(overall)
cursor = self.ui.textEdit.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
pos = cursor.position()
self.ui.textEdit.append(fulltext[0])
# Apply brush, yellow for coder 1, blue for coder 2 and green for dual coded
cursor = self.ui.textEdit.textCursor()
fmt = QtGui.QTextCharFormat()
# Foreground depends on the defined need_white_text color in color_selector
for i, c in enumerate(char_list_coders):
if c == 'b':
cursor.setPosition(pos + i, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(pos + i + 1, QtGui.QTextCursor.KeepAnchor)
color = "#81BEF7"
brush = QBrush(QtGui.QColor(color))
fmt.setBackground(brush)
text_brush = QBrush(QtGui.QColor(TextColor(color).recommendation))
fmt.setForeground(text_brush)
cursor.setCharFormat(fmt)
if c == 'g':
cursor.setPosition(pos + i, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(pos + i + 1, QtGui.QTextCursor.KeepAnchor)
color = "#81F781"
brush = QBrush(QtGui.QColor(color))
fmt.setBackground(brush)
text_brush = QBrush(QtGui.QColor(TextColor(color).recommendation))
fmt.setForeground(text_brush)
cursor.setCharFormat(fmt)
if c == 'y':
cursor.setPosition(pos + i, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(pos + i + 1, QtGui.QTextCursor.KeepAnchor)
color = "#F4FA58"
brush = QBrush(QtGui.QColor(color))
fmt.setBackground(brush)
text_brush = QBrush(QtGui.QColor(TextColor(color).recommendation))
fmt.setForeground(text_brush)
cursor.setCharFormat(fmt)
def fill_tree(self):
""" Fill tree widget, top level items are main categories and unlinked codes. """
cats = copy(self.categories)
codes = copy(self.codes)
self.ui.treeWidget.clear()
self.ui.treeWidget.setColumnCount(2)
self.ui.treeWidget.setHeaderLabels([_("Code Tree"), "Id"])
self.ui.treeWidget.hideColumn(1)
if self.app.settings['showids'] == 'True':
self.ui.treeWidget.showColumn(1)
self.ui.treeWidget.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.ui.treeWidget.header().setStretchLastSection(False)
# Add top level categories
remove_list = []
for c in cats:
if c['supercatid'] is None:
top_item = QtWidgets.QTreeWidgetItem([c['name'], 'catid:' + str(c['catid'])])
self.ui.treeWidget.addTopLevelItem(top_item)
remove_list.append(c)
for item in remove_list:
cats.remove(item)
''' Add child categories. Look at each unmatched category, iterate through tree to
add as child then remove matched categories from the list. '''
count = 0
while len(cats) > 0 and count < 10000:
remove_list = []
for c in cats:
it = QtWidgets.QTreeWidgetItemIterator(self.ui.treeWidget)
item = it.value()
while item: # while there is an item in the list
if item.text(1) == 'catid:' + str(c['supercatid']):
child = QtWidgets.QTreeWidgetItem([c['name'], 'catid:' + str(c['catid'])])
item.addChild(child)
remove_list.append(c)
it += 1
item = it.value()
for item in remove_list:
cats.remove(item)
count += 1
# Add unlinked codes as top level items
remove_items = []
for c in codes:
if c['catid'] is None:
top_item = QtWidgets.QTreeWidgetItem([c['name'], 'cid:' + str(c['cid'])])
top_item.setBackground(0, QBrush(QtGui.QColor(c['color']), Qt.SolidPattern))
color = TextColor(c['color']).recommendation
top_item.setForeground(0, QBrush(QtGui.QColor(color)))
top_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
self.ui.treeWidget.addTopLevelItem(top_item)
remove_items.append(c)
for item in remove_items:
codes.remove(item)
# Add codes as children
for c in codes:
it = QtWidgets.QTreeWidgetItemIterator(self.ui.treeWidget)
item = it.value()
while item:
if item.text(1) == 'catid:' + str(c['catid']):
child = QtWidgets.QTreeWidgetItem([c['name'], 'cid:' + str(c['cid'])])
child.setBackground(0, QBrush(QtGui.QColor(c['color']), Qt.SolidPattern))
color = TextColor(c['color']).recommendation
child.setForeground(0, QBrush(QtGui.QColor(color)))
child.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
item.addChild(child)
c['catid'] = -1 # make unmatchable
it += 1
item = it.value()
self.ui.treeWidget.expandAll()
class DialogDualCodedImage(QtWidgets.QDialog):
""" View two coders coded sections for one code in original image.
Called by: report_compare_coder_file.DialogCompareCoderByFile.
"""
app = None
img = None
coded0 = None
coded1 = None
pixmap = None
label = None
scale = None
scene = None
def __init__(self, app, img, coded0, coded1, parent=None):
""" Displays dialog with two coders image codings for selected code.
param:
app : class containing app details such as database connection
img contains {id, name, memo, mediapath, type:image}
coded0 and coded1 contain: {x1, y1, width, height, area}
mediapath may be a link as: 'images:path'
"""
sys.excepthook = exception_handler
self.app = app
self.img = img
self.coded0 = coded0
self.coded1 = coded1
self.scale = 1
QtWidgets.QDialog.__init__(self)
self.ui = Ui_Dialog_code_context_image()
| |
import json
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Callable, Dict, List, Optional, Tuple
from collections import defaultdict
import numpy as np
import torch
from seq2seq_trainer import Seq2SeqTrainer
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
BartTokenizer,
EvalPrediction,
HfArgumentParser,
MBartTokenizer,
T5Tokenizer,
TrainingArguments,
set_seed,
)
from transformers.modeling_bart import shift_tokens_right
from utils import (
LegacySeq2SeqDataset,
Seq2SeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
freeze_params,
lmap,
trim_batch,
use_task_specific_params,
)
logger = logging.getLogger(__name__)
class Seq2SeqDataCollator:
def __init__(self, tokenizer, data_args, tpu_num_cores=None):
self.tokenizer = tokenizer
self.pad_token_id = tokenizer.pad_token_id
self.data_args = data_args
self.tpu_num_cores = tpu_num_cores
self.add_prefix_space = isinstance(tokenizer, BartTokenizer)
def __call__(self, batch) -> Dict[str, torch.Tensor]:
if hasattr(self.tokenizer, "prepare_seq2seq_batch"):
batch = self._encode(batch)
input_ids, attention_mask, labels = (
batch["input_ids"],
batch["attention_mask"],
batch["labels"],
)
else:
input_ids = torch.stack([x["input_ids"] for x in batch])
attention_mask = torch.stack([x["attention_mask"] for x in batch])
labels = torch.stack([x["labels"] for x in batch])
labels = trim_batch(labels, self.pad_token_id)
input_ids, attention_mask = trim_batch(input_ids, self.pad_token_id, attention_mask=attention_mask)
if isinstance(self.tokenizer, T5Tokenizer):
decoder_input_ids = self._shift_right_t5(labels)
labels = labels
else:
decoder_input_ids = shift_tokens_right(labels, self.pad_token_id)
labels = labels
batch = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"labels": labels,
}
return batch
def _shift_right_t5(self, input_ids):
decoder_start_token_id = self.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information"
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
return shifted_input_ids
def _encode(self, batch) -> Dict[str, torch.Tensor]:
batch_encoding = self.tokenizer.prepare_seq2seq_batch(
[x["src_texts"] for x in batch],
# src_lang=self.data_args.src_lang,
tgt_texts=[x["tgt_texts"] for x in batch],
# tgt_lang=self.data_args.tgt_lang,
max_length=self.data_args.max_source_length,
max_target_length=self.data_args.max_target_length,
padding="max_length" if self.tpu_num_cores is not None else "longest", # TPU hack
return_tensors="pt",
# add_prefix_space=self.add_prefix_space,
)
return batch_encoding.data
@dataclass
class Seq2SeqTrainingArguments(TrainingArguments):
"""
Parameters:
label_smoothing (:obj:`float`, `optional`, defaults to 0):
The label smoothing epsilon to apply (if not zero).
sortish_sampler (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to SortishSamler or not. It sorts the inputs according to lenghts in-order to minimizing the padding size.
predict_with_generate (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use generate to calculate generative metrics (ROUGE, BLEU).
"""
label_smoothing: Optional[float] = field(
default=0.0, metadata={"help": "The label smoothing epsilon to apply (if not zero)."}
)
sortish_sampler: bool = field(default=False, metadata={"help": "Whether to SortishSamler or not."})
predict_with_generate: bool = field(
default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."}
)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
freeze_encoder: bool = field(default=False, metadata={"help": "Whether tp freeze the encoder."})
freeze_embeds: bool = field(default=False, metadata={"help": "Whether to freeze the embeddings."})
trigram_loss: bool = field(default=False, metadata={"help": "Whether to add trigram penalty during loss calculation."})
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
)
task: Optional[str] = field(
default="summarization",
metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=600,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=600,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
test_max_target_length: Optional[int] = field(
default=600,
metadata={
"help": "The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
n_train: Optional[int] = field(default=-1, metadata={"help": "# training examples. -1 means use all."})
n_val: Optional[int] = field(default=-1, metadata={"help": "# validation examples. -1 means use all."})
n_test: Optional[int] = field(default=-1, metadata={"help": "# test examples. -1 means use all."})
src_lang: Optional[str] = field(default=None, metadata={"help": "Source language id for translation."})
tgt_lang: Optional[str] = field(default=None, metadata={"help": "Target language id for translation."})
eval_beams: Optional[int] = field(default=None, metadata={"help": "# num_beams to use for evaluation."})
def get_freq_sequences(data_dir, tokenizer):
big_map = defaultdict(int)
with open(os.path.join(data_dir, "train.target"), 'r', encoding='utf-8') as f:
for paragraph in f.readlines():
words = paragraph.split(' ')
for i in range(0, len(words)-2):
li = words[i:i+3]
has_num = False
for tok in li:
num = ''
try:
num = int(tok)
except:
try:
num = text2num(tok)
except:
pass
if isinstance(num, int):
has_num = True
if not has_num:
current_seq = ' '.join(li)
big_map[current_seq] += 1
tokens = tokenizer.batch_encode_plus(
[k for k, v in sorted(big_map.items(), key=lambda item: item[1], reverse=True)][:75],
return_tensors='pt',
padding='max_length'
)
return {tuple(x[1:3].tolist()): x[4] for x in tokens['input_ids']}
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=".ckpt" in model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
)
tokenizer.add_special_tokens({'additional_special_tokens': [
'<|HOME|>', '<|AWAY|>',
'<|PLAYER-START_POSITION|>', '<|PLAYER-MIN|>', '<|PLAYER-PTS|>', '<|PLAYER-FGM|>', '<|PLAYER-FGA|>', '<|PLAYER-FG_PCT|>', '<|PLAYER-FG3M|>', '<|PLAYER-FG3A|>', '<|PLAYER-FG3_PCT|>', '<|PLAYER-FTM|>', '<|PLAYER-FTA|>', '<|PLAYER-FT_PCT|>', '<|PLAYER-OREB|>', '<|PLAYER-DREB|>', '<|PLAYER-REB|>', '<|PLAYER-AST|>', '<|PLAYER-TO|>', '<|PLAYER-STL|>', '<|PLAYER-BLK|>', '<|PLAYER-PF|>',
'<|TEAM-PTS_QTR1|>', '<|TEAM-PTS_QTR2|>', '<|TEAM-PTS_QTR3|>', '<|TEAM-PTS_QTR4|>', '<|TEAM-PTS|>', '<|TEAM-FG_PCT|>', '<|TEAM-FG3_PCT|>', '<|TEAM-FT_PCT|>', '<|TEAM-REB|>', '<|TEAM-AST|>', '<|TEAM-TOV|>', '<|TEAM-WINS|>', '<|TEAM-LOSSES|>', '<|TEAM-CITY|>', '<|TEAM-NAME|>',
]})
# tokenizer.model_max_length = 1300
# tokenizer.max_length = 1300
print(tokenizer.model_max_length)
model.resize_token_embeddings(len(tokenizer))
freq_seqs = get_freq_sequences(data_args.data_dir, tokenizer) if model_args.trigram_loss else None
# use task specific params
use_task_specific_params(model, data_args.task)
# set num_beams for evaluation
if data_args.eval_beams is not None:
model.config.num_beams = data_args.eval_beams
assert model.config.num_beams >= 1, f"got eval_beams={model.config.num_beams}. Need an integer >= 1"
# set max length for generation
model.config.max_generate_length = data_args.val_max_target_length
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(tokenizer, MBartTokenizer):
decoder_start_token_id = tokenizer.lang_code_to_id[data_args.tgt_lang]
model.config.decoder_start_token_id = decoder_start_token_id
def build_compute_metrics_fn(task_name: str) -> Callable[[EvalPrediction], Dict]:
def non_pad_len(tokens: np.ndarray) -> int:
return np.count_nonzero(tokens != tokenizer.pad_token_id)
def decode_pred(pred: EvalPrediction) -> Tuple[List[str], List[str]]:
pred_str = tokenizer.batch_decode(pred.predictions, skip_special_tokens=True)
label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
pred_str = lmap(str.strip, pred_str)
label_str = lmap(str.strip, label_str)
return pred_str, label_str
def summarization_metrics(pred: EvalPrediction) -> Dict:
pred_str, label_str = decode_pred(pred)
rouge: Dict = calculate_rouge(pred_str, label_str)
summ_len = np.mean(lmap(non_pad_len, pred.predictions))
rouge.update({"gen_len": summ_len})
return rouge
def translation_metrics(pred: EvalPrediction) -> Dict:
pred_str, label_str = decode_pred(pred)
bleu: Dict = calculate_bleu(pred_str, label_str)
gen_len = np.mean(lmap(non_pad_len, pred.predictions))
bleu.update({"gen_len": gen_len})
return bleu
compute_metrics_fn = summarization_metrics if "summarization" in task_name else translation_metrics
return compute_metrics_fn
def freeze_embeds(model: torch.nn.Module):
"""Freeze token embeddings and positional embeddings for bart, just token embeddings for t5."""
try:
freeze_params(model.model.shared)
for d in [model.model.encoder, model.model.decoder]:
freeze_params(d.embed_positions)
freeze_params(d.embed_tokens)
except AttributeError:
freeze_params(model.shared)
for d in [model.encoder, model.decoder]:
| |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 17:11:46 2020
@author: Mahdi
"""
from airsim_client import *
from DQN_model import DQNModel
import time
import numpy as np
import json
import os
import datetime
import sys
import copy
import pandas as pd
# A class that represents the agent that will drive the vehicle, train the model.
class Agent():
def __init__(self, parameters):
required_parameters = ['data_dir', 'max_episode_runtime_sec', 'replay_memory_size', 'batch_size', 'min_epsilon', 'per_iter_epsilon_reduction', 'train_conv_layers']
for required_parameter in required_parameters:
if required_parameter not in parameters:
print(required_parameter)
raise ValueError('Missing required parameter {0}'.format(required_parameter))
print('Starting time: {0}'.format(datetime.datetime.utcnow()), file=sys.stderr)
self.__model_buffer = None
self.__model = None
self.__airsim_started = False
self.__data_dir = parameters['data_dir']
self.__per_iter_epsilon_reduction = float(parameters['per_iter_epsilon_reduction'])
self.__min_epsilon = float(parameters['min_epsilon'])
self.__max_episode_runtime_sec = float(parameters['max_episode_runtime_sec'])
self.__replay_memory_size = int(parameters['replay_memory_size'])
self.__batch_size = int(parameters['batch_size'])
self.train_from_json = bool((parameters['train_from_json'].lower().strip() == 'true'))
self.__train_conv_layers = bool((parameters['train_conv_layers'].lower().strip() == 'true'))
self.__epsilon = 0.05 if self.train_from_json else 1
self.__num_batches_run = 0
self.__num_episodes = 0
self.__last_checkpoint_batch_count = 0
if 'batch_update_frequency' in parameters:
self.__batch_update_frequency = int(parameters['batch_update_frequency'])
if 'weights_path' in parameters:
self.__weights_path = parameters['weights_path']
else:
self.__weights_path = None
self.__car_client = None
self.__car_controls = None
self.__experiences = {}
self.__experiences['pre_states'] = []
self.__experiences['post_states'] = []
self.__experiences['actions'] = []
self.__experiences['rewards'] = []
self.__experiences['predicted_rewards'] = []
self.__experiences['is_not_terminal'] = []
self.__init_road_points()
self.__init_reward_points()
# Starts the agent
def start(self):
self.__run_function()
# The function that will be run during training.
# start AirSim, and continuously run training iterations.
def __run_function(self):
print('Starting run function')
if self.train_from_json :
MODEL_FILENAME = None # 'D:/Documents/spyder/DQNProject/DataDir/checkpoint/run/m3000_far15_act5_0_0.5_0.9 (5).json' #Your model goes here
# load the model from disk
print('Receiving Model from JSON')
self.__model = DQNModel(None, self.__train_conv_layers)
with open(MODEL_FILENAME, 'r') as f:
checkpoint_data = json.loads(f.read())
self.__model.from_packet(checkpoint_data['model'])
else:
self.__model = DQNModel(self.__weights_path, self.__train_conv_layers)
# Connect to the AirSim exe
self.__connect_to_airsim()
# Fill the replay memory by driving randomly.
print('Filling replay memory...')
while True:
print('Running Airsim episode.')
try:
if self.train_from_json :
self.__run_airsim_episode(False)
else:
self.__run_airsim_episode(True)
percent_full = 100.0 * len(self.__experiences['actions'])/self.__replay_memory_size
print('Replay memory now contains {0} members. ({1}% full)'.format(len(self.__experiences['actions']), percent_full))
if (percent_full >= 100.0):
break
except msgpackrpc.error.TimeoutError:
print('Lost connection to AirSim while fillling replay memory. Attempting to reconnect.')
self.__connect_to_airsim()
# Get the latest model. Other agents may have finished before us.
print('Replay memory filled. Starting main loop...')
while True:
try:
if (self.__model is not None):
#Generate a series of training examples by driving the vehicle in AirSim
print('Running Airsim episode.')
self.__num_episodes += 1
experiences, action_count = self.__run_airsim_episode(False)
# If we didn't immediately crash, train on the gathered experiences
if (action_count > 0):
print('Generating {0} minibatches...'.format(action_count))
print('Sampling Experiences.')
# Sample experiences from the replay memory
sampled_experiences = self.__sample_experiences(experiences, action_count, False)
self.__num_batches_run += action_count
# If we successfully sampled, train on the collected minibatches.
if (len(sampled_experiences) > 0):
print('Publishing AirSim episode.')
self.__publish_batch_and_update_model(sampled_experiences, action_count)
# Occasionally, the AirSim exe will stop working.
# For example, if a user connects to the node to visualize progress.
# In that case, attempt to reconnect.
except msgpackrpc.error.TimeoutError:
print('Lost connection to AirSim. Attempting to reconnect.')
self.__connect_to_airsim()
# Connects to the AirSim Exe.
# Assume that it is already running. After 10 successive attempts, attempt to restart the executable.
def __connect_to_airsim(self):
attempt_count = 0
while True:
try:
print('Attempting to connect to AirSim (attempt {0})'.format(attempt_count))
self.__car_client = CarClient()
self.__car_client.confirmConnection()
self.__car_client.enableApiControl(True)
self.__car_controls = CarControls()
print('Connected!')
return
except:
print('Failed to connect.')
# Appends a sample to a ring buffer.
# If the appended example takes the size of the buffer over buffer_size, the example at the front will be removed.
def __append_to_ring_buffer(self, item, buffer, buffer_size):
if (len(buffer) >= buffer_size):
buffer = buffer[1:]
buffer.append(item)
return buffer
# Runs an interation of data generation from AirSim.
# Data will be saved in the replay memory.
def __run_airsim_episode(self, always_random):
print('Running AirSim episode.')
# Pick a random starting point on the roads
starting_points, starting_direction = self.__get_next_starting_point()
# Initialize the state buffer.
# For now, save 4 images at 0.01 second intervals.
state_buffer_len = 4
state_buffer = []
wait_delta_sec = 0.03
print('Getting Pose')
self.__car_client.simSetPose(Pose(Vector3r(starting_points[0], starting_points[1], starting_points[2]), AirSimClientBase.toQuaternion(starting_direction[0], starting_direction[1], starting_direction[2])), True)
# Currently, simSetPose does not allow us to set the velocity.
# So, if we crash and call simSetPose, the car will be still moving at its previous velocity.
# We need the car to stop moving, so push the brake and wait for a few seconds.
print('Waiting for momentum to die')
self.__car_controls.steering = 0
self.__car_controls.throttle = 0
self.__car_controls.brake = 1
self.__car_client.setCarControls(self.__car_controls)
time.sleep(3)
print('Resetting')
self.__car_client.simSetPose(Pose(Vector3r(starting_points[0], starting_points[1], starting_points[2]), AirSimClientBase.toQuaternion(starting_direction[0], starting_direction[1], starting_direction[2])), True)
#Start the car rolling so it doesn't get stuck
print('Running car for a few seconds...')
self.__car_controls.steering = 0
self.__car_controls.throttle = 0.5
self.__car_controls.brake = 0
self.__car_client.setCarControls(self.__car_controls)
# While the car is rolling, start initializing the state buffer
stop_run_time =datetime.datetime.now() + datetime.timedelta(seconds=2)
while(datetime.datetime.now() < stop_run_time):
time.sleep(wait_delta_sec)
state_buffer = self.__append_to_ring_buffer(self.__get_image(), state_buffer, state_buffer_len)
done = False
actions = []
rewards = []
episode_dur = 0
car_state = self.__car_client.getCarState()
start_time = datetime.datetime.utcnow()
end_time = start_time + datetime.timedelta(seconds=self.__max_episode_runtime_sec)
num_random = 0
far_off = False
rst = False
# Main data collection loop
while not done:
collision_info = self.__car_client.getCollisionInfo()
utc_now = datetime.datetime.utcnow()
# Check for terminal conditions:
# 1) Car has collided
# 2) Car is stopped
# 3) The run has been running for longer than max_episode_runtime_sec.
# This constraint is so the model doesn't end up having to process huge chunks of data, slowing down training
# 4) The car has run off the road
if (collision_info.has_collided or car_state.speed < 1 or utc_now > end_time or rst):
print('Start time: {0}, end time: {1}'.format(start_time, utc_now), file=sys.stderr)
if (utc_now > end_time):
print('timed out.')
print('Full autonomous run finished at {0}'.format(utc_now), file=sys.stderr)
done = True
episode_dur = utc_now - start_time
sys.stderr.flush() # "flush" the buffer to the terminal
else:
# The Agent should occasionally pick random action instead of best action
do_greedy = np.random.random_sample()
# pre_state should have the last 4 images in a list format
pre_state = copy.deepcopy(state_buffer) # copy recursively
if (do_greedy < self.__epsilon or always_random):
num_random += 1
action_of_pre_state = self.__model.get_random_action()
predicted_reward = 0
print('Model randomly pick action {0}'.format(action_of_pre_state))
else:
action_of_pre_state, predicted_reward = self.__model.predict_action_and_reward(pre_state)
print('Model predicts action {0}'.format(action_of_pre_state))
# Convert the selected state to a control signal
next_control_signals = self.__model.state_to_control_signals(action_of_pre_state, self.__car_client.getCarState())
# Take the action
self.__car_controls.steering = next_control_signals[0]
self.__car_controls.throttle = next_control_signals[1]
self.__car_controls.brake = next_control_signals[2]
self.__car_client.setCarControls(self.__car_controls)
# Wait for a short period of time to see outcome
time.sleep(wait_delta_sec)
# Observe outcome and compute reward from action
state_buffer = self.__append_to_ring_buffer(self.__get_image(), state_buffer, state_buffer_len)
car_state = self.__car_client.getCarState()
collision_info = self.__car_client.getCollisionInfo()
# Procedure to compute distance reward
#Get the car position
position_key = bytes('position', encoding='utf8')
x_val_key = bytes('x_val', encoding='utf8')
y_val_key = bytes('y_val', encoding='utf8')
car_point = np.array([car_state.kinematics_true[position_key][x_val_key], car_state.kinematics_true[position_key][y_val_key], 0])
# Distance component is exponential distance to nearest line
distance = 999
#Compute the distance to the nearest center line
for line in self.__reward_points: # e.g: (-251.21722656 -209.60329102 0.0 , -132.21722656 -209.60329102 0.0 )
local_distance = 0
length_squared = ((line[0][0]-line[1][0])**2) + ((line[0][1]-line[1][1])**2) # e.g: (X1-X2)^2 + (Y1-Y2)^2 = 14161.0
if (length_squared != 0):
# calc the projected point of the car_point on the road center line
t = max(0, min(1, np.dot(car_point-line[0], line[1]-line[0]) / length_squared)) # e.g: np.dot( (car_point - (X1,Y1,Z1)) , [119. 0. 0.]) / 14161.0
proj = line[0] + (t * (line[1]-line[0])) # if t=0 -> proj=(X1,Y1) if t=1 -> proj=(X2,Y2) proj = (X1,Y1) + t*[119. 0. 0.]
# calc the car_point distance from its projection on the road center line
local_distance = np.linalg.norm(proj - car_point)
distance = min(local_distance, distance)
distance_reward = math.exp(-(distance * DISTANCE_DECAY_RATE))
far_off = distance > THRESH_DIST_far
rst = distance > THRESH_DIST_rst
reward = self.__compute_reward(collision_info, car_state, action_of_pre_state, distance_reward, far_off)
# Add the experience to the set of examples from this iteration
# Add the list of each kind of data from this iteration to the replay memory
# self.__add_to_replay_memory('pre_states') = [iter1,iter2,iter3,...]
self.__add_to_replay_memory('pre_states', pre_state)
self.__add_to_replay_memory('post_states', state_buffer)
actions.append(action_of_pre_state)
self.__add_to_replay_memory('actions', action_of_pre_state)
rewards.append(reward)
self.__add_to_replay_memory('rewards', reward)
self.__add_to_replay_memory('predicted_rewards', predicted_reward)
self.__add_to_replay_memory('is_not_terminal', 1)
# Only the last state is a terminal state.
self.__experiences['is_not_terminal'][-1] = 0
# is_not_terminal = [1 for i in range(0, len(actions)-1, 1)]
# | |
amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.abs(z[0])**2,np.abs(z[0])**2,fr_guess]
x0 = guess_x0_mag_nonlinear(x,z,verbose = True)
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_mag_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above but fine and gain scans are provided seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(fine_x)],[np.max(fine_x),1000000,100,np.pi,5,np.inf,np.inf,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
x0 = guess_x0_mag_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
#stack the scans for curvefit
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
z_err = np.sqrt(4*np.real(z_err)**2*np.real(z)**2+4*np.imag(z_err)**2*np.imag(z)**2) #propogation of errors left out cross term
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,sigma = z_err,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#compute reduced chi squared
print(len(z))
if use_err:
#red_chi_sqr = np.sum((np.abs(z)**2-fit_result)**2/z_err**2)/(len(z)-7.)
# only use fine scan for reduced chi squared.
red_chi_sqr = np.sum((np.abs(fine_z)**2-fit_result[0:len(fine_z)])**2/z_err[0:len(fine_z)]**2)/(len(fine_z)-7.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
def amplitude_normalization(x,z):
'''
# normalize the amplitude varation requires a gain scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(x-np.median(x))>100000) #100kHz away from resonator
poly = np.polyfit(x[index_use],np.abs(z[index_use]),2)
poly_func = np.poly1d(poly)
normalized_data = z/poly_func(x)*np.median(np.abs(z[index_use]))
return normalized_data
def amplitude_normalization_sep(gain_x,gain_z,fine_x,fine_z,stream_x,stream_z):
'''
# normalize the amplitude varation requires a gain scan
# uses gain scan to normalize does not use fine scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(gain_x-np.median(gain_x))>100000) #100kHz away from resonator
poly = np.polyfit(gain_x[index_use],np.abs(gain_z[index_use]),2)
poly_func = np.poly1d(poly)
poly_data = poly_func(gain_x)
normalized_gain = gain_z/poly_data*np.median(np.abs(gain_z[index_use]))
normalized_fine = fine_z/poly_func(fine_x)*np.median(np.abs(gain_z[index_use]))
normalized_stream = stream_z/poly_func(stream_x)*np.median(np.abs(gain_z[index_use]))
amp_norm_dict = {'normalized_gain':normalized_gain,
'normalized_fine':normalized_fine,
'normalized_stream':normalized_stream,
'poly_data':poly_data}
return amp_norm_dict
def guess_x0_iq_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_iq_nonlinear_sep
# below. it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = np.argmin(np.abs(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = np.argmin(np.abs(right))+fr_guess_index_fine
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(z)))-np.min(20*np.log10(np.abs(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#i0 and iq guess
if np.max(np.abs(fine_z))==np.max(np.abs(z)): #if the resonator has an impedance mismatch rotation that makes the fine greater that the cabel delay
i0_guess = np.real(fine_z[np.argmax(np.abs(fine_z))])
q0_guess = np.imag(fine_z[np.argmax(np.abs(fine_z))])
else:
i0_guess = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.
q0_guess = (np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
if len(gain_z)>1: #is there a gain scan?
m = (gain_phase - np.roll(gain_phase,1))/(gain_x-np.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = np.median(m[~np.isnan(m)])
tau_guess = m_best/(2*np.pi)
else:
tau_guess = 3*10**-9
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("i0 guess = %.2f" %i0_guess)
print("q0 guess = %.2f" %q0_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,i0_guess,q0_guess,tau_guess,fr_guess]
return x0
def guess_x0_mag_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_mag_nonlinear_sep
#below it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
#this will probably break if there is no fine scan
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = np.argmin(np.abs(fine_z))
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = np.argmin(np.abs(right))+fr_guess_index_fine
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(z)))-np.min(20*np.log10(np.abs(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#b0 and b1 guess
if len(gain_z)>1:
xlin = (gain_x - fr_guess)/fr_guess
b1_guess = (np.abs(gain_z)[-1]**2-np.abs(gain_z)[0]**2)/(xlin[-1]-xlin[0])
else:
xlin = (fine_x - fr_guess)/fr_guess
b1_guess = (np.abs(fine_z)[-1]**2-np.abs(fine_z)[0]**2)/(xlin[-1]-xlin[0])
b0_guess = np.median(np.abs(gain_z)**2)
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("phi guess = %.2f" %phi_guess)
print("b0 guess = %.2f" %b0_guess)
print("b1 guess = %.2f" %b1_guess)
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,b0_guess,b1_guess,fr_guess]
return x0
def guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z,verbose = False):
'''
# this is the same as guess_x0_iq_nonlinear except that it takes
# takes the fine scan and the gain scan as seperate variables
# this runs into less issues when trying to sort out what part of
# data is fine and what part is gain for the guessing
#make sure data is sorted from low to high frequency
'''
#gain phase
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index == 0:
fr_guess_index = len(fine_x)//2
elif fr_guess_index == (len(fine_x)-1):
fr_guess_index = len(fine_x)//2
fr_guess = fine_x[fr_guess_index]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index:-1]
left = half_distance[0:fr_guess_index]
right_index = np.argmin(np.abs(right))+fr_guess_index
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(gain_z)))-np.min(20*np.log10(np.abs(fine_z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
#phi_guess = 0
#guess impedance rotation phi
#fit a circle to the iq loop
xc, yc, R, residu = calibrate.leastsq_circle(np.real(fine_z),np.imag(fine_z))
#compute angle between (off_res,off_res),(0,0) and (off_ress,off_res),(xc,yc) of the the fitted circle
off_res_i,off_res_q = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.,(np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
x1, y1, = -off_res_i,-off_res_q
x2, y2 = xc-off_res_i,yc-off_res_q
dot = x1*x2 + y1*y2 # dot product
det = x1*y2 - y1*x2 # determinant
angle = np.arctan2(det, dot)
phi_guess = angle
# if phi is large better | |
to identify the type
Returns:
List of Asset code already defined into the PI Server
Raises:
"""
payload = payload_builder.PayloadBuilder() \
.WHERE(['configuration_key', '=', configuration_key]) \
.AND_WHERE(['type_id', '=', type_id]) \
.payload()
omf_created_objects = self._sending_process_instance._storage.query_tbl_with_payload('omf_created_objects', payload)
self._logger.debug("{func} - omf_created_objects {item} ".format(
func="_retrieve_omf_types_already_created",
item=omf_created_objects))
# Extracts only the asset_code column
rows = []
for row in omf_created_objects['rows']:
rows.append(row['asset_code'])
return rows
def _flag_created_omf_type(self, configuration_key, type_id, asset_code):
""" Stores into the Storage layer the successfully creation of the type into PICROMF.
Args:
configuration_key - part of the key to identify the type
type_id - part of the key to identify the type
asset_code - asset code defined into PICROMF
Returns:
Raises:
"""
payload = payload_builder.PayloadBuilder()\
.INSERT(configuration_key=configuration_key,
asset_code=asset_code,
type_id=type_id)\
.payload()
self._sending_process_instance._storage.insert_into_tbl("omf_created_objects", payload)
def _generate_omf_asset_id(self, asset_code):
""" Generates an asset id usable by AF/PI Server from an asset code stored into the Storage layer
Args:
asset_code : Asset code stored into the Storage layer
Returns:
Asset id usable by AF/PI Server
Raises:
"""
asset_id = asset_code.replace(" ", "")
return asset_id
def _generate_omf_measurement(self, asset_code):
""" Generates the measurement id associated to an asset code
Args:
asset_code : Asset code retrieved from the Storage layer
Returns:
Measurement id associated to the specific asset code
Raises:
"""
asset_id = asset_code.replace(" ", "")
type_id = self._config_omf_types['type-id']['value']
return type_id + _OMF_PREFIX_MEASUREMENT + asset_id
def _generate_omf_typename_automatic(self, asset_code):
""" Generates the typename associated to an asset code for the automated generation of the OMF types
Args:
asset_code : Asset code retrieved from the Storage layer
Returns:
typename associated to the specific asset code
Raises:
"""
asset_id = asset_code.replace(" ", "")
return asset_id + _OMF_SUFFIX_TYPENAME
def _create_omf_objects_automatic(self, asset_info):
""" Handles the Automatic OMF Type Mapping
Args:
asset_info : Asset's information as retrieved from the Storage layer,
having also a sample value for the asset
Returns:
response_status_code: http response code related to the PICROMF request
Raises:
"""
typename, omf_type = self._create_omf_type_automatic(asset_info)
self._create_omf_object_links(asset_info["asset_code"], typename, omf_type)
def _create_omf_type_automatic(self, asset_info):
""" Automatic OMF Type Mapping - Handles the OMF type creation
Args:
asset_info : Asset's information as retrieved from the Storage layer,
having also a sample value for the asset
Returns:
typename : typename associate to the asset
omf_type : describe the OMF type as a python dict
Raises:
"""
type_id = self._config_omf_types["type-id"]["value"]
sensor_id = self._generate_omf_asset_id(asset_info["asset_code"])
asset_data = asset_info["asset_data"]
typename = self._generate_omf_typename_automatic(sensor_id)
new_tmp_dict = copy.deepcopy(OMF_TEMPLATE_TYPE)
omf_type = {typename: new_tmp_dict["typename"]}
# Handles Static section
# Generates elements evaluating the StaticData retrieved form the Configuration Manager
omf_type[typename][0]["properties"]["Name"] = {
"type": "string",
"isindex": True
}
omf_type[typename][0]["id"] = type_id + "_" + typename + "_sensor"
for item in self._config['StaticData']:
omf_type[typename][0]["properties"][item] = {"type": "string"}
# Handles Dynamic section
omf_type[typename][1]["properties"]["Time"] = {
"type": "string",
"format": "date-time",
"isindex": True
}
omf_type[typename][1]["id"] = type_id + "_" + typename + "_measurement"
for item in asset_data:
item_type = plugin_common.evaluate_type(asset_data[item])
omf_type[typename][1]["properties"][item] = {"type": item_type}
if _log_debug_level == 3:
self._logger.debug("_create_omf_type_automatic - sensor_id |{0}| - omf_type |{1}| ".format(sensor_id, str(omf_type)))
self.send_in_memory_data_to_picromf("Type", omf_type[typename])
return typename, omf_type
def _create_omf_objects_configuration_based(self, asset_code, asset_code_omf_type):
""" Handles the Configuration Based OMF Type Mapping
Args:
asset_code
asset_code_omf_type : describe the OMF type as a python dict
Returns:
Raises:
"""
typename, omf_type = self._create_omf_type_configuration_based(asset_code_omf_type)
self._create_omf_object_links(asset_code, typename, omf_type)
def _create_omf_type_configuration_based(self, asset_code_omf_type):
""" Configuration Based OMF Type Mapping - Handles the OMF type creation
Args:
asset_code_omf_type : describe the OMF type as a python dict
Returns:
typename : typename associate to the asset
omf_type : describe the OMF type as a python dict
Raises:
"""
type_id = self._config_omf_types["type-id"]["value"]
typename = asset_code_omf_type["typename"]
new_tmp_dict = copy.deepcopy(OMF_TEMPLATE_TYPE)
omf_type = {typename: new_tmp_dict["typename"]}
# Handles Static section
omf_type[typename][0]["properties"] = asset_code_omf_type["static"]
omf_type[typename][0]["id"] = type_id + "_" + typename + "_sensor"
# Handles Dynamic section
omf_type[typename][1]["properties"] = asset_code_omf_type["dynamic"]
omf_type[typename][1]["id"] = type_id + "_" + typename + "_measurement"
if _log_debug_level == 3:
self._logger.debug("_create_omf_type_configuration_based - omf_type |{0}| ".format(str(omf_type)))
self.send_in_memory_data_to_picromf("Type", omf_type[typename])
return typename, omf_type
def _create_omf_object_links(self, asset_code, typename, omf_type):
""" Handles the creation of the links between the OMF objects :
sensor, its measurement, sensor type and measurement type
Args:
asset_code
typename : name/id of the type
omf_type : describe the OMF type as a python dict
Returns:
Raises:
"""
sensor_id = self._generate_omf_asset_id(asset_code)
measurement_id = self._generate_omf_measurement(sensor_id)
type_sensor_id = omf_type[typename][0]["id"]
type_measurement_id = omf_type[typename][1]["id"]
# Handles containers
containers = copy.deepcopy(_OMF_TEMPLATE_CONTAINER)
containers[0]["id"] = measurement_id
containers[0]["typeid"] = type_measurement_id
# Handles static_data
static_data = copy.deepcopy(_OMF_TEMPLATE_STATIC_DATA)
static_data[0]["typeid"] = type_sensor_id
static_data[0]["values"][0] = copy.deepcopy(self._config['StaticData'])
static_data[0]["values"][0]['Name'] = sensor_id
# Handles link_data
link_data = copy.deepcopy(_OMF_TEMPLATE_LINK_DATA)
link_data[0]["values"][0]['source']['typeid'] = type_sensor_id
link_data[0]["values"][0]['target']['typeid'] = type_sensor_id
link_data[0]["values"][0]['target']['index'] = sensor_id
link_data[0]["values"][1]['source']['typeid'] = type_sensor_id
link_data[0]["values"][1]['source']['index'] = sensor_id
link_data[0]["values"][1]['target']['containerid'] = measurement_id
if _log_debug_level == 3:
self._logger.debug("_create_omf_object_links - asset_code |{0}| - containers |{1}| ".format(asset_code,
str(containers)))
self._logger.debug("_create_omf_object_links - asset_code |{0}| - static_data |{1}| ".format(asset_code,
str(static_data)))
self._logger.debug("_create_omf_object_links - asset_code |{0}| - link_data |{1}| ".format(asset_code,
str(link_data)))
self.send_in_memory_data_to_picromf("Container", containers)
self.send_in_memory_data_to_picromf("Data", static_data)
self.send_in_memory_data_to_picromf("Data", link_data)
return
@_performance_log
def create_omf_objects(self, raw_data, config_category_name, type_id):
""" Handles the creation of the OMF types related to the asset codes using one of the 2 possible ways :
Automatic OMF Type Mapping
Configuration Based OMF Type Mapping
Args:
raw_data : data block to manage as retrieved from the Storage layer
config_category_name: used to identify OMF objects already created
type_id: used to identify OMF objects already created
Returns:
Raises:
"""
asset_codes_to_evaluate = plugin_common.identify_unique_asset_codes(raw_data)
asset_codes_already_created = self._retrieve_omf_types_already_created(config_category_name, type_id)
for item in asset_codes_to_evaluate:
asset_code = item["asset_code"]
# Evaluates if it is a new OMF type
if not any(tmp_item == asset_code for tmp_item in asset_codes_already_created):
asset_code_omf_type = ""
try:
asset_code_omf_type = copy.deepcopy(self._config_omf_types[asset_code]["value"])
except KeyError:
configuration_based = False
else:
configuration_based = True
if configuration_based:
self._logger.debug("creates type - configuration based - asset |{0}| ".format(asset_code))
self._create_omf_objects_configuration_based(asset_code, asset_code_omf_type)
else:
# handling - Automatic OMF Type Mapping
self._logger.debug("creates type - automatic handling - asset |{0}| ".format(asset_code))
self._create_omf_objects_automatic(item)
self._flag_created_omf_type(config_category_name, type_id, asset_code)
else:
self._logger.debug("asset already created - asset |{0}| ".format(asset_code))
@_performance_log
def send_in_memory_data_to_picromf(self, message_type, omf_data):
""" Sends data to PICROMF - it retries the operation using a sleep time increased *2 for every retry
it logs a WARNING only at the end of the retry mechanism in case of a communication error
Args:
message_type: possible values {Type, Container, Data}
omf_data: OMF message to send
Returns:
Raises:
Exception: an error occurred during the OMF request
URLFetchError: in case of http response code different from 2xx
"""
sleep_time = self._config['OMFRetrySleepTime']
_message = ""
_error = False
num_retry = 1
msg_header = {'producertoken': self._config['producerToken'],
'messagetype': message_type,
'action': 'create',
'messageformat': 'JSON',
'omfversion': '1.0'}
omf_data_json = json.dumps(omf_data)
self._logger.debug("OMF message length |{0}| ".format(len(omf_data_json)))
if _log_debug_level == 3:
self._logger.debug("OMF message : |{0}| |{1}| " .format(message_type, omf_data_json))
while num_retry <= self._config['OMFMaxRetry']:
_error = False
try:
response = requests.post(self._config['URL'],
headers=msg_header,
data=omf_data_json,
verify=False,
timeout=self._config['OMFHttpTimeout'])
except Exception as e:
_error = Exception(plugin_common.MESSAGES_LIST["e000024"].format(e))
_message = plugin_common.MESSAGES_LIST["e000024"].format(e)
else:
# Evaluate the HTTP status codes
if not str(response.status_code).startswith('2'):
tmp_text = str(response.status_code) + " " + response.text
_message = plugin_common.MESSAGES_LIST["e000024"].format(tmp_text)
_error = plugin_exceptions.URLFetchError(_message)
self._logger.debug("message type |{0}| response: |{1}| |{2}| ".format(message_type,
response.status_code,
response.text))
if _error:
time.sleep(sleep_time)
num_retry += 1
sleep_time *= 2
else:
break
if _error:
self._logger.warning(_message)
raise _error
@_performance_log
def transform_in_memory_data(self, data_to_send, raw_data):
""" Transforms the in memory data into a new structure that could be converted into JSON for the PICROMF
Args:
Returns:
Raises:
"""
new_position = 0
data_available = False
# statistics
num_sent = 0
# internal statistic - rows that generate errors in the preparation process, before sending them to OMF
num_unsent = 0
try:
for row in raw_data:
row_id = row['id']
asset_code = row['asset_code']
# Identification of the object/sensor
measurement_id = self._generate_omf_measurement(asset_code)
try:
self._transform_in_memory_row(data_to_send, row, measurement_id)
# Used for the statistics update
num_sent += 1
# Latest position reached
new_position = row_id
data_available = True
except Exception as e:
num_unsent += 1
self._logger.warning(plugin_common.MESSAGES_LIST["e000023"].format(e))
except Exception:
self._logger.error(plugin_common.MESSAGES_LIST["e000021"])
raise
return data_available, new_position, num_sent
def _transform_in_memory_row(self, data_to_send, row, target_stream_id):
""" Extends the in memory structure using data retrieved from the Storage Layer
Args:
data_to_send: data block to send - updated/used by reference
row: information retrieved from the Storage Layer that it is used to | |
# -*- coding: utf-8 -*-
import copy
import os
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import QuerySet
from django.test import TestCase, Client, mock
from django.urls import reverse
from ..forms import AddBookForm
from ..models import (TheUser, Category, Author, Language, Book,
AddedBook, BookRating, BookComment, Post, SupportMessage, BookRelatedData)
from .utils import Utils
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_DIR = os.path.join(TEST_DIR, 'fixtures')
# ----------------------------------------------------------------------------------------------------------------------
class ModelTest(TestCase):
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setUpTestData(cls):
cls.setup_users()
cls.setup_categories()
cls.setup_authors()
cls.setup_languages()
cls.setup_books()
cls.setup_added_books()
cls.setup_book_rating()
cls.setup_book_comment()
cls.setup_post_messages()
cls.setup_support_messages()
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_users(cls):
client = Client()
cls.anonymous_user = auth.get_user(client)
cls.user1 = User.objects.create_user('user1', '<EMAIL>', '<PASSWORD>')
cls.user2 = User.objects.create_user('user2', '<EMAIL>', '<PASSWORD>')
cls.user3 = User.objects.create_user('user3', '<EMAIL>', '<PASSWORD>')
cls.user4 = User.objects.create_user('user4', '<EMAIL>', '<PASSWORD>')
cls.user5 = User.objects.create_user('user5', '<EMAIL>', '<PASSWORD>')
cls.user6 = User.objects.create_user('user6', '<EMAIL>', '<PASSWORD>')
cls.the_user1 = TheUser.objects.get(id_user=cls.user1)
cls.the_user2 = TheUser.objects.get(id_user=cls.user2)
cls.the_user5 = TheUser.objects.get(id_user=cls.user5)
cls.the_user6 = TheUser.objects.get(id_user=cls.user6)
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_categories(cls):
cls.category1 = Category.objects.create(category_name='category1')
cls.category2 = Category.objects.create(category_name='category2')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_authors(cls):
cls.author1 = Author.objects.create(author_name='Best Author 1')
cls.author2 = Author.objects.create(author_name='trueAuthorNew')
cls.author3 = Author.objects.create(author_name='zlast author')
cls.author4 = Author.objects.create(author_name='<AuthorSpecialSymbols>&"')
cls.author5 = Author.objects.create(author_name="O'Connor")
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_languages(cls):
cls.language_en = Language.objects.create(language='English')
cls.language_ru = Language.objects.create(language='Russian')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_books(cls):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
test_book_image_path = os.path.join(TEST_DATA_DIR, 'test_book_image.png')
books_setup = [
{
'name': 'First Book',
'author': cls.author1,
'category': cls.category1,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user1,
'private': True
},
{
'name': 'Second Book',
'author': cls.author2,
'category': cls.category1,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'who_added': cls.the_user2,
'blocked_book': True
},
{
'name': 'Third Book',
'author': cls.author2,
'category': cls.category1,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user1,
'blocked_book': True
},
{
'name': 'Fourth Book',
'author': cls.author1,
'category': cls.category1,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user2,
'blocked_book': True
},
{
'name': 'Fifth Book',
'author': cls.author1,
'category': cls.category2,
'language': cls.language_ru,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'who_added': cls.the_user1,
'private': True
},
{
'name': 'Sixth Book',
'author': cls.author2,
'category': cls.category2,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user2
},
{
'name': 'Seventh Book<>&"',
'author': cls.author4,
'category': cls.category2,
'language': cls.language_en,
'file': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
'photo': SimpleUploadedFile('test_book_image.png', open(test_book_image_path, 'rb').read()),
'who_added': cls.the_user2
}
]
for book in books_setup:
Book.objects.create(
book_name=book['name'],
id_author=book['author'],
id_category=book['category'],
description='TEST description',
language=book['language'],
book_file=book['file'],
photo=book.get('photo', False),
who_added=book['who_added'],
private_book=book.get('private', False),
blocked_book=book.get('blocked_book', False)
)
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_added_books(cls):
AddedBook.objects.create(id_user=cls.the_user1, id_book=Book.objects.get(book_name='Third Book'))
AddedBook.objects.create(id_user=cls.the_user1, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user1, id_book=Book.objects.get(book_name='Fourth Book'))
AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Third Book'))
AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user2, id_book=Book.objects.get(book_name='Second Book'))
AddedBook.objects.create(id_user=cls.the_user5, id_book=Book.objects.get(book_name='Sixth Book'))
AddedBook.objects.create(id_user=cls.the_user6, id_book=Book.objects.get(book_name='Sixth Book'))
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_book_rating(cls):
BookRating.objects.create(id_book=Book.objects.get(book_name='Third Book'), id_user=cls.the_user1, rating=10)
BookRating.objects.create(id_book=Book.objects.get(book_name='Third Book'), id_user=cls.the_user2, rating=5)
BookRating.objects.create(id_book=Book.objects.get(book_name='Third Book'), id_user=cls.the_user5, rating=3)
BookRating.objects.create(id_book=Book.objects.get(book_name='Fourth Book'), id_user=cls.the_user1, rating=7)
BookRating.objects.create(id_book=Book.objects.get(book_name='Sixth Book'), id_user=cls.the_user1, rating=4)
BookRating.objects.create(id_book=Book.objects.get(book_name='Second Book'), id_user=cls.the_user2, rating=7)
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_book_comment(cls):
second_book = Book.objects.get(book_name='Second Book')
third_book = Book.objects.get(book_name='Third Book')
fourth_book = Book.objects.get(book_name='Fourth Book')
BookComment.objects.create(id_book=second_book, id_user=cls.the_user1, text='Test book 2 user 1')
BookComment.objects.create(id_book=second_book, id_user=cls.the_user2, text='Test book 2 user 2')
BookComment.objects.create(id_book=third_book, id_user=cls.the_user1, text='Test book 3 user 1')
BookComment.objects.create(id_book=fourth_book, id_user=cls.the_user1, text='Test book 4 user 1')
BookComment.objects.create(id_book=fourth_book, id_user=cls.the_user5, text='Test book 4 user 5')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
@mock.patch('app.signals.email_dispatch.apply_async', new=mock.Mock())
def setup_post_messages(cls):
Post.objects.create(user=cls.the_user1, heading='post 1', text='Posted test text 1')
Post.objects.create(user=cls.the_user1, heading='post 2', text='Posted test text 2')
Post.objects.create(user=cls.the_user2, heading='post 3', text='Posted test text 3')
# ------------------------------------------------------------------------------------------------------------------
@classmethod
def setup_support_messages(cls):
SupportMessage.objects.create(email='<EMAIL>', text='Test text1')
SupportMessage.objects.create(email='<EMAIL>', text='Test text2')
SupportMessage.objects.create(email='<EMAIL>', text='Test text3')
SupportMessage.objects.create(email='<EMAIL>', text='Test text4')
# ------------------------------------------------------------------------------------------------------------------
def test_the_user_str(self):
self.assertEqual(str(self.the_user1), 'user1')
self.assertEqual(str(self.the_user2), 'user2')
# ------------------------------------------------------------------------------------------------------------------
def test_creating_the_user_objects(self):
"""
Must create 'app.models.TheUser' instance after django User instance was created.
"""
self.assertEqual(User.objects.all().count(), 6)
self.assertEqual(User.objects.all().count(), TheUser.objects.all().count())
self.assertNotEqual(self.the_user1.auth_token, '')
self.assertNotEqual(self.the_user1.auth_token, self.the_user2.auth_token)
# ------------------------------------------------------------------------------------------------------------------
def test_the_user_get_api_reminders(self):
reminders = self.the_user1.get_api_reminders()
reminders_keys_correct = ['vk', 'fb_group', 'fb_page', 'twitter', 'disabled_all', 'app_rate']
self.assertTrue(isinstance(reminders, dict))
self.assertEqual(sorted(list(reminders.keys())), sorted(reminders_keys_correct))
# ------------------------------------------------------------------------------------------------------------------
def test_the_user_get_web_reminders(self):
reminders = self.the_user1.get_web_reminders()
reminders_keys_correct = ['vk', 'fb_group', 'fb_page', 'twitter', 'disabled_all', 'app_download']
self.assertTrue(isinstance(reminders, dict))
self.assertEqual(sorted(list(reminders.keys())), sorted(reminders_keys_correct))
# ------------------------------------------------------------------------------------------------------------------
def test_the_user_update_reminder(self):
reminders = self.the_user1.get_web_reminders()
self.assertTrue(isinstance(reminders, dict))
self.assertEqual(reminders['vk'], True)
self.assertEqual(reminders['app_download'], True)
self.the_user1.update_reminder('vk', False)
self.the_user1.update_reminder('app_download', False)
updated_reminders = self.the_user1.get_web_reminders()
self.assertTrue(isinstance(updated_reminders, dict))
self.assertEqual(updated_reminders['vk'], False)
self.assertEqual(updated_reminders['app_download'], False)
# ------------------------------------------------------------------------------------------------------------------
def test_removing_user_objects(self):
"""
Must remove django User instance after 'app.models.TheUser' objects was deleted.
"""
the_user3 = TheUser.objects.get(id_user__username='user3')
the_user4 = TheUser.objects.get(id_user__email='<EMAIL>')
the_user3.delete()
the_user4.delete()
self.assertEqual(User.objects.all().count(), 4)
self.assertEqual(User.objects.all().count(), TheUser.objects.all().count())
# ------------------------------------------------------------------------------------------------------------------
def test_created_categories(self):
self.assertEqual(Category.objects.all().count(), 2)
self.assertNotEqual(self.category1, self.category2)
# ------------------------------------------------------------------------------------------------------------------
def test_categories_str(self):
self.assertEqual(str(self.category1), 'category1')
self.assertEqual(str(self.category2), 'category2')
# ------------------------------------------------------------------------------------------------------------------
def test_created_authors(self):
self.assertEqual(Author.objects.all().count(), 5)
self.assertNotEqual(self.author1, self.author2)
# ------------------------------------------------------------------------------------------------------------------
def test_get_authors_list(self):
"""
Must return authors list depending on different letters/letter case/words/symbols.
"""
self.assertEqual(Author.get_authors_list('bEst'), ['Best Author 1'])
self.assertEqual(Author.get_authors_list('1'), ['Best Author 1'])
self.assertEqual(Author.get_authors_list(' '), ['Best Author 1', 'zlast author'])
self.assertEqual(Author.get_authors_list('new'), ['trueAuthorNew'])
self.assertEqual(Author.get_authors_list('TRUE'), ['trueAuthorNew'])
self.assertEqual(Author.get_authors_list('Best Author 1'), ['Best Author 1'])
self.assertEqual(Author.get_authors_list('trueAuthorNew'), ['trueAuthorNew'])
# ------------------------------------------------------------------------------------------------------------------
def test_get_authors_list_with_escaping(self):
self.assertEqual(Author.get_authors_list("'", True), ['O'Connor'])
self.assertEqual(Author.get_authors_list("Connor", True), ['O'Connor'])
self.assertEqual(
Author.get_authors_list('b', True),
['Best Author 1', '<AuthorSpecialSymbols>&"']
)
self.assertEqual(
Author.get_authors_list('e', True),
['Best Author 1', 'trueAuthorNew', '<AuthorSpecialSymbols>&"']
)
self.assertEqual(
Author.get_authors_list('author', True),
['Best Author 1', 'trueAuthorNew', 'zlast author', '<AuthorSpecialSymbols>&"']
)
# ------------------------------------------------------------------------------------------------------------------
def test_get_authors_list_without_escaping(self):
self.assertEqual(Author.get_authors_list("'"), ["O'Connor"])
self.assertEqual(Author.get_authors_list("Connor", False), ["O'Connor"])
self.assertEqual(Author.get_authors_list('b'), ['Best Author 1', '<AuthorSpecialSymbols>&"'])
self.assertEqual(
Author.get_authors_list('e'),
['Best Author 1', 'trueAuthorNew', '<AuthorSpecialSymbols>&"']
)
self.assertEqual(
Author.get_authors_list('author', False),
['Best Author 1', 'trueAuthorNew', 'zlast author', '<AuthorSpecialSymbols>&"']
)
# ------------------------------------------------------------------------------------------------------------------
def test_created_language(self):
self.assertEqual(Language.objects.all().count(), 2)
self.assertNotEqual(self.author1, self.author2)
# ------------------------------------------------------------------------------------------------------------------
def test_created_books(self):
books = Book.objects.all()
self.assertEqual(books.count(), 7)
self.assertEqual(books.filter(private_book=True).count(), 2)
self.assertEqual(books.filter(id_category=self.category1).count(), 4)
self.assertEqual(books.filter(id_author=self.author1).count(), 3)
self.assertEqual(books.filter(language=self.language_en).count(), 4)
self.assertEqual(books.filter(photo=False).count(), 2)
self.assertEqual(books.filter(who_added=self.the_user1).count(), 3)
self.assertEqual(books.filter(id_category=self.category2, id_author=self.author2).count(), 1)
self.assertEqual(books.filter(id_category=self.category1,
id_author=self.author2,
language=self.language_ru,
who_added=self.the_user1).count(), 1)
self.assertEqual(books.filter(id_category=self.category1,
id_author=self.author2,
language=self.language_ru,
who_added=self.the_user2).count(), 0)
self.assertEqual(books.filter(blocked_book=True).count(), 3)
# ------------------------------------------------------------------------------------------------------------------
def test_get_related_objects_for_create(self):
test_book_path = os.path.join(TEST_DATA_DIR, 'test_book.pdf')
form_data = {
'bookname': 'The new book',
'author': 'trueAuthorNew',
'category': 'category1',
'language': 'English',
'about': 'about book',
'bookfile': SimpleUploadedFile('test_book.pdf', open(test_book_path, 'rb').read()),
}
form_data_new_author = copy.deepcopy(form_data)
form_data_new_author['author'] = 'super new author'
self.assertEqual(Author.objects.all().count(), 5)
form = AddBookForm(data=form_data)
form.is_valid()
form_with_new_author = AddBookForm(data=form_data_new_author)
form_with_new_author.is_valid()
related_data = Book.get_related_objects_for_create(self.user1.id, form)
self.assertTrue(isinstance(related_data, BookRelatedData))
self.assertEqual(len(related_data), 4)
self.assertEqual(related_data.author, Author.objects.get(author_name='trueAuthorNew'))
self.assertEqual(Author.objects.all().count(), 5)
related_data_new_author = Book.get_related_objects_for_create(self.user1.id, form_with_new_author)
self.assertTrue(isinstance(related_data, BookRelatedData))
self.assertEqual(len(related_data_new_author), 4)
self.assertEqual(related_data_new_author.author, Author.objects.get(author_name='super <PASSWORD> author'))
self.assertEqual(Author.objects.all().count(), 6)
# ------------------------------------------------------------------------------------------------------------------
def test_get_related_objects_create_api(self):
"""
Must generate Book related data when creates a Book object for API calls.
New author must be returned if it's name not present in the Author model.
"""
test_data = {'author': 'trueAuthorNew', 'category': 'category2', 'language': 'Russian'}
test_data_new_author = {'author': 'NEW AUTHOR', 'category': 'category1', 'language': 'English'}
self.assertEqual(
Book.get_related_objects_create_api(self.the_user1, test_data),
BookRelatedData(self.author2, self.category2, self.language_ru, None)
)
self.assertEqual(Author.objects.all().count(), 5)
self.assertEqual(
Book.get_related_objects_create_api(self.the_user1, test_data_new_author),
BookRelatedData(Author.objects.get(author_name='NEW AUTHOR'), self.category1, self.language_en, None)
)
self.assertEqual(Author.objects.all().count(), 6)
# ------------------------------------------------------------------------------------------------------------------
def test_get_related_objects_selected_book_unknown_user(self):
"""
Must generate selected book related data for unknown (anonymous) users.
"""
third_book = Book.objects.get(book_name='Third Book')
sixth_book = Book.objects.get(book_name='Sixth Book')
self.assertTrue(isinstance(Book.get_related_objects_selected_book(self.anonymous_user, third_book.id), dict))
related_third_book = Book.get_related_objects_selected_book(self.anonymous_user, third_book.id)
related_sixth_book = Book.get_related_objects_selected_book(self.anonymous_user, sixth_book.id)
self.assertEqual(related_third_book['book'], third_book)
self.assertEqual(related_third_book['avg_book_rating'], {'rating__avg': 6.0})
self.assertEqual(related_third_book['book_rating_count'], 3)
self.assertEqual(related_third_book['added_book'], None)
self.assertEqual(related_third_book['comments'].count(), 1)
self.assertEqual(related_third_book['comments'][0],
BookComment.objects.filter(id_book=third_book).order_by('-id')[0])
self.assertEqual(related_sixth_book['book'], sixth_book)
self.assertEqual(related_sixth_book['avg_book_rating'], {'rating__avg': 4.0})
self.assertEqual(related_sixth_book['book_rating_count'], 1)
self.assertEqual(related_sixth_book['added_book'], None)
self.assertEqual(related_sixth_book['comments'].count(), 0)
AddedBook.objects.create(id_user=self.the_user5, id_book=third_book)
BookRating.objects.create(id_user=self.the_user6, id_book=third_book, rating=10)
BookComment.objects.create(id_user=self.the_user6, id_book=third_book, text='TEST TEXT 2')
related_third_book = Book.get_related_objects_selected_book(self.anonymous_user, third_book.id)
self.assertEqual(related_third_book['book'], third_book)
self.assertEqual(related_third_book['avg_book_rating'], {'rating__avg': 7.0})
self.assertEqual(related_third_book['book_rating_count'], 4)
self.assertEqual(related_third_book['added_book'], None)
self.assertEqual(related_third_book['comments'].count(), 2)
# ------------------------------------------------------------------------------------------------------------------
def test_get_related_objects_selected_book_added_user(self):
"""
This case is testing only 'added_book' param, because for
user who is reading the book only this attribute will change relatively to function above.
"""
third_book = Book.objects.get(book_name='Third Book')
sixth_book = Book.objects.get(book_name='Sixth Book')
self.assertTrue(isinstance(Book.get_related_objects_selected_book(self.the_user1.id_user, third_book.id), dict))
related_third_book = Book.get_related_objects_selected_book(self.the_user1.id_user, third_book.id)
related_sixth_book = Book.get_related_objects_selected_book(self.the_user1.id_user, sixth_book.id)
self.assertEqual(related_third_book['added_book'],
AddedBook.objects.get(id_book=third_book, id_user=self.the_user1))
self.assertEqual(related_sixth_book['added_book'],
AddedBook.objects.get(id_book=sixth_book, id_user=self.the_user1))
# ------------------------------------------------------------------------------------------------------------------
def test_get_related_objects_selected_book_with_user_key(self):
"""
Tests returning data for related objects for selected book with 'user_key' attribute, meaning that
user is anonymous (i.e. not logged) but with using user key. Done for API requests access.
"""
third_book = Book.objects.get(book_name='Third Book')
related_third_book = Book.get_related_objects_selected_book(
self.anonymous_user, third_book.id, self.the_user1.auth_token
)
self.assertEqual(related_third_book['book'], third_book)
self.assertEqual(related_third_book['avg_book_rating'], {'rating__avg': 6.0})
self.assertEqual(related_third_book['book_rating_count'], 3)
self.assertEqual(related_third_book['added_book'],
AddedBook.objects.get(id_book=third_book, id_user=self.the_user1))
self.assertEqual(related_third_book['comments'].count(), 1)
self.assertEqual(related_third_book['comments'][0],
BookComment.objects.filter(id_book=third_book).order_by('-id')[0])
# ------------------------------------------------------------------------------------------------------------------
def test_sort_by_book_name_category1(self):
"""
Must generate correct dictionaries for anonymous users, users with private books and without.
Testing first category.
"""
first_book = Book.objects.get(book_name='First Book')
third_book = Book.objects.get(book_name='Third Book')
fourth_book = Book.objects.get(book_name='Fourth Book')
first_book_dict = Utils.generate_sort_dict(first_book)
third_book_dict = Utils.generate_sort_dict(third_book)
fourth_book_dict = Utils.generate_sort_dict(fourth_book)
self.assertTrue(isinstance(Book.sort_by_book_name(self.anonymous_user, self.category1), list))
self.assertEqual(len(Book.sort_by_book_name(self.anonymous_user, self.category1)), 3)
self.assertEqual(Book.sort_by_book_name(self.anonymous_user, self.category1)[0], fourth_book_dict)
self.assertEqual(Book.sort_by_book_name(self.anonymous_user, self.category1)[2], third_book_dict)
self.assertEqual(len(Book.sort_by_book_name(self.the_user2.id_user, self.category1)), 3)
self.assertEqual(Book.sort_by_book_name(self.the_user2.id_user, self.category1)[0], fourth_book_dict)
self.assertEqual(Book.sort_by_book_name(self.the_user2.id_user, self.category1)[2], third_book_dict)
self.assertEqual(len(Book.sort_by_book_name(self.the_user1.id_user, self.category1)), 4)
self.assertEqual(Book.sort_by_book_name(self.the_user1.id_user, self.category1)[0], first_book_dict)
self.assertEqual(Book.sort_by_book_name(self.the_user1.id_user, self.category1)[3], third_book_dict)
# ------------------------------------------------------------------------------------------------------------------
def test_sort_by_book_name_category2(self):
"""
Must generate correct dictionaries for anonymous users, users with private books and without.
Testing first category.
"""
fifth_book = Book.objects.get(book_name='Fifth Book')
seventh_book = Book.objects.get(book_name='Seventh Book<>&"')
fifth_book_dict = Utils.generate_sort_dict(fifth_book)
seventh_book_dict = Utils.generate_sort_dict(seventh_book)
self.assertEqual(len(Book.sort_by_book_name(self.anonymous_user, self.category2)), 2)
self.assertEqual(Book.sort_by_book_name(self.anonymous_user, self.category2)[0], seventh_book_dict)
self.assertEqual(len(Book.sort_by_book_name(self.the_user2.id_user, self.category2)), 2)
self.assertEqual(Book.sort_by_book_name(self.the_user2.id_user, self.category2)[0], seventh_book_dict)
self.assertEqual(len(Book.sort_by_book_name(self.the_user1.id_user, self.category2)), 3)
| |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
HidrowebDownloader
A QGIS plugin
Download hydrological data from ANA's API (Hidroweb)
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2021-03-27
git sha : $Format:%H$
copyright : (C) 2021 by <NAME>
email : <EMAIL>
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication, QVariant
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QAction
from qgis.core import *
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .hidroweb_downloader_dialog import HidrowebDownloaderDialog
import os.path
from shapely.geometry import Point, Polygon, MultiPolygon
import requests, csv, os, datetime, calendar
import xml.etree.ElementTree as ET
class HidrowebDownloader:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'HidrowebDownloader_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Hidroweb Downloader')
# Check if plugin was started the first time in current QGIS session
# Must be set in initGui() to survive plugin reloads
self.first_start = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('HidrowebDownloader', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
# Adds plugin icon to Plugins toolbar
self.iface.addToolBarIcon(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/hidroweb_downloader/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Download hydrological data from Hidroweb'),
callback=self.run,
parent=self.iface.mainWindow())
# will be set False in run()
self.first_start = True
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Hidroweb Downloader'),
action)
self.iface.removeToolBarIcon(action)
def run(self):
"""Run method that performs all the real work"""
# Create the dialog with elements (after translation) and keep reference
# Only create GUI ONCE in callback, so that it will only load when the plugin is started
if self.first_start == True:
self.first_start = False
self.dlg = HidrowebDownloaderDialog()
self.dlg.download_button.clicked.connect(self.polygon_station)
self.dlg.inventarioDownload_button.clicked.connect(self.inventario)
# show the dialog
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
# Do something useful here - delete the line containing pass and
# substitute with your code.
# print('ok')
print(self.dlg.file_widget.filePath())
def polygon_station(self):
error = self.check_errors()
if error:
print('Error')
# sys.exit()
else:
layer_input = self.dlg.mapLayer_box.currentLayer()
print(layer_input)
feat = layer_input.getFeatures()
for l in feat:
feat_geometry = l.geometry()
if self.dlg.buffer_spinbox.value() == 0:
pass
else:
feat_geometry = self.create_buffer_polygon(feat_geometry=feat_geometry, distance=self.dlg.buffer_spinbox.value(), segments=5)
with open(self.dlg.inventario_path.filePath(), encoding='utf8') as csvfile:
total = len(list(csv.DictReader(csvfile)))
print(total)
with open(self.dlg.inventario_path.filePath(), encoding='utf8') as csvfile:
data = csv.DictReader(csvfile)
i = 0
for row in data:
i += 1
# print(row)
self.dlg.progressBar.setValue(i/float(total)*100)
if feat_geometry.contains(QgsPointXY(float(row['Longitude']), float(row['Latitude']))):
print('aqui')
print(row['TipoEstacao'])
if (self.dlg.rain_checkbox.isChecked()) and (not self.dlg.flow_checkbox.isChecked()) and (int(row['TipoEstacao'])==2):
print('rain checkbox')
self.point_station(codigo=row['Codigo'],
tipoEstacao=row['TipoEstacao'],
lon=row['Longitude'],
lat=row['Latitude'])
elif (self.dlg.flow_checkbox.isChecked()) and (not self.dlg.rain_checkbox.isChecked()) and (int(row['TipoEstacao'])==1):
print('flow checkbox')
print(row['Codigo'])
self.point_station(codigo=row['Codigo'],
tipoEstacao=row['TipoEstacao'],
lon=row['Longitude'],
lat=row['Latitude'])
elif (self.dlg.rain_checkbox.isChecked()) and (self.dlg.flow_checkbox.isChecked()):
print('both rain and flow checkbox')
self.point_station(codigo=row['Codigo'],
tipoEstacao=row['TipoEstacao'],
lon=row['Longitude'],
lat=row['Latitude'])
else:
print('Nada selecionado')
# print(self.dlg.inventario_path.filePath()[:-3])
self.iface.messageBar().pushMessage('Success', 'Programa finalizado!', level=Qgis.Success)
def point_station(self, codigo, tipoEstacao, lon, lat):
layers = list(QgsProject.instance().mapLayers().values())
layers_name = [l.name() for l in layers]
s = self.download_station(code=codigo,
typeData=tipoEstacao,
folder_toDownload=f'{self.dlg.data_folder.filePath()}',
lon=lon, lat=lat)
if (not f'{codigo}_{tipoEstacao}' in layers_name) and (s[0]):
lyr = QgsVectorLayer("point?crs=epsg:4326&field=id:integer", f"{codigo}_{tipoEstacao}", "memory")
QgsProject.instance().addMapLayer(lyr)
target_layer = QgsProject.instance().mapLayersByName(f'{codigo}_{tipoEstacao}')
target_layer[0].startEditing()
l_d = target_layer[0].dataProvider()
feat = QgsFeature(target_layer[0].fields())
feat.setGeometry(QgsPoint(float(lon), float(lat)))
if int(tipoEstacao)== 1:
l_d.addAttributes([QgsField('Date', QVariant.Date), QgsField('Consistencia', QVariant.Int), QgsField('Vazao',QVariant.Double)])
for i, (date, consis, data) in enumerate(zip(s[1], s[2], s[3])):
feat.setAttributes([i, date.strftime('%Y-%m-%d'),consis,data])
l_d.addFeatures([feat])
elif int(tipoEstacao) == 2:
l_d.addAttributes([QgsField('Date', QVariant.Date), QgsField('Consistencia', QVariant.Int), QgsField('Chuva',QVariant.Double)])
for i, (date, consis, data) in enumerate(zip(s[1], s[2], s[3])):
feat.setAttributes([i, date.strftime('%Y-%m-%d'),consis,data])
l_d.addFeatures([feat])
target_layer[0].updateExtents()
target_layer[0].commitChanges()
else:
pass
def download_station(self, code, typeData, folder_toDownload, lon, lat):
if int(typeData) == 1:
typeData = '3'
else:
pass
params = {'codEstacao': f'{int(code):08}', 'dataInicio': '', 'dataFim': '', 'tipoDados': '{}'.format(typeData), 'nivelConsistencia': ''}
response = requests.get(r'http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroSerieHistorica', params)
# response = requests.get(r'http://telemetriaws1.ana.gov.br/ServiceANA.asmx?op=HidroSerieHistorica', params)
# print(code,response.status_code)
tree = ET.ElementTree(ET.fromstring(response.content))
root = tree.getroot()
list_data = []
list_consistenciaF = []
list_month_dates = []
lon = float(lon)
lat = float(lat)
for i in root.iter('SerieHistorica'):
codigo = i.find("EstacaoCodigo").text
consistencia = i.find("NivelConsistencia").text
date = i.find("DataHora").text
date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
last_day = calendar.monthrange(date.year, date.month)[1]
month_dates = [date + datetime.timedelta(days=i) for i in range(last_day)]
data = []
list_consistencia = []
for day in range(last_day):
if params['tipoDados'] == '3':
value = 'Vazao{:02}'.format(day+1)
try:
data.append(float(i.find(value).text))
list_consistencia.append(int(consistencia))
except TypeError:
data.append(i.find(value).text)
list_consistencia.append(int(consistencia))
except AttributeError:
data.append(None)
list_consistencia.append(int(consistencia))
if params['tipoDados'] == '2':
value = 'Chuva{:02}'.format(day+1)
try:
data.append(float(i.find(value).text))
list_consistencia.append(consistencia)
except TypeError:
data.append(i.find(value).text)
list_consistencia.append(consistencia)
except AttributeError:
data.append(None)
list_consistencia.append(consistencia)
list_data = list_data + data
list_consistenciaF = list_consistenciaF + list_consistencia
list_month_dates = list_month_dates + month_dates
if len(list_data) > 0:
rows = zip(list_month_dates,[lon for l in range(len(list_month_dates))],[lat for l in range(len(list_month_dates))], list_consistenciaF, list_data)
with open(os.path.join(folder_toDownload, f'{codigo}_{typeData}.csv'), 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(('Date','Longitude','Latitude', f'Consistencia_{codigo}_{typeData}', f'Data_{codigo}_{typeData}'))
for row in rows:
writer.writerow(row)
print('CSV gerado')
return (True, list_month_dates, list_consistenciaF, list_data)
else:
print('Dado insuficiente')
return (False, list_month_dates, list_consistenciaF, list_data)
def create_buffer_polygon(self, feat_geometry, distance, segments):
layers = list(QgsProject.instance().mapLayers().values())
layers_name = [l.name() for l in layers]
if not 'buffer_polygon' in layers_name:
lyr = QgsVectorLayer("polygon?crs=epsg:4326&field=id:integer", f"buffer_polygon", "memory")
QgsProject.instance().addMapLayer(lyr)
target_layer = QgsProject.instance().mapLayersByName('buffer_polygon')
target_layer[0].startEditing()
l_d = target_layer[0].dataProvider()
# feats = target_layer[0].getFeatures()
# for feat in feats:
# geom = feat.geometry()
feat = QgsFeature(target_layer[0].fields())
feat.setGeometry(feat_geometry.buffer(distance, segments))
l_d.addFeature(feat)
target_layer[0].updateExtents()
target_layer[0].commitChanges()
f = target_layer[0].getFeatures()
for l in f:
l_geometry = l.geometry()
return l_geometry
def inventario(self):
api_inventario = 'http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroInventario'
params = {'codEstDE':'','codEstATE':'','tpEst':'','nmEst':'','nmRio':'','codSubBacia':'',
'codBacia':'','nmMunicipio':'','nmEstado':'','sgResp':'','sgOper':'','telemetrica':''}
self.dlg.progressBar_inventario.setValue(2)
response = requests.get(api_inventario, params)
self.dlg.progressBar_inventario.setValue(10)
tree = ET.ElementTree(ET.fromstring(response.content))
root = tree.getroot()
self.dlg.progressBar_inventario.setValue(15)
if os.path.isfile(os.path.join(self.dlg.file_widget.filePath(), f'inventario.csv')):
print('Arquivo inventario já existe')
self.dlg.progressBar_inventario.setValue(100)
else:
with open(os.path.join(self.dlg.file_widget.filePath(), f'inventario.csv'), 'w',newline='') as f:
writer = csv.writer(f)
writer.writerow(('Codigo', 'Latitude','Longitude','TipoEstacao'))
self.dlg.progressBar_inventario.setValue(20)
# print(len(root.findall('Codigo')))
total = len(list(root.iter('Table')))
j = 0
self.dlg.progressBar_inventario.setValue(25)
for i in root.iter('Table'):
print(i.find('Codigo').text, i.find('Latitude').text, i.find('Longitude').text, i.find('TipoEstacao').text)
writer.writerow((i.find('Codigo').text, i.find('Latitude').text, i.find('Longitude').text, i.find('TipoEstacao').text))
j+=1
# self.dlg.progressBar_inventario.setValue(j/float(total)*100)
self.dlg.progressBar_inventario.setValue(100)
print('Arquivo inventario.csv criado')
self.dlg.inventario_path.setFilePath(os.path.join(self.dlg.file_widget.filePath(), 'inventario.csv'))
self.iface.messageBar().pushMessage('Success', 'Download do inventario.csv concluído!', level=Qgis.Success)
def check_errors(self):
error = False
print(self.dlg.inventario_path.filePath()[-4:])
if (self.dlg.inventario_path.filePath() == None) or (self.dlg.inventario_path.filePath()=='') or (not self.dlg.inventario_path.filePath()[-4:]=='.csv'):
print(self.dlg.inventario_path.filePath())
self.iface.messageBar().pushMessage("Error", "inventario.csv não encontrado", level=Qgis.Critical, duration=5)
error = True
if self.dlg.mapLayer_box.currentLayer() == | |
Constraint(expr=m.x1036*(8.00892516581441 + m.x2097) - m.x3474 == 0)
m.c1037 = Constraint(expr=m.x1037*(97.3173040663597 + m.x2098) - m.x3475 == 0)
m.c1038 = Constraint(expr=m.x1038*(58.8520674314227 + m.x2099) - m.x3476 == 0)
m.c1039 = Constraint(expr=m.x1039*(89.3791992560023 + m.x2100) - m.x3477 == 0)
m.c1040 = Constraint(expr=m.x1040*(177.636006160939 + m.x2101) - m.x3478 == 0)
m.c1041 = Constraint(expr=m.x1041*(373.780859160202 + m.x2102) - m.x3479 == 0)
m.c1042 = Constraint(expr=m.x1042*(67.92235 + m.x2003) - m.x3480 == 0)
m.c1043 = Constraint(expr=m.x1043*(17.52572 + m.x2004) - m.x3481 == 0)
m.c1044 = Constraint(expr=m.x1044*(215.1945909789 + m.x2007) - m.x3482 == 0)
m.c1045 = Constraint(expr=m.x1045*(82.3846155412095 + m.x2009) - m.x3483 == 0)
m.c1046 = Constraint(expr=m.x1046*(15.77529785051 + m.x2010) - m.x3484 == 0)
m.c1047 = Constraint(expr=m.x1047*(17.73824148824 + m.x2012) - m.x3485 == 0)
m.c1048 = Constraint(expr=m.x1048*(70.841638270004 + m.x2015) - m.x3486 == 0)
m.c1049 = Constraint(expr=m.x1049*(29.0537838075122 + m.x2019) - m.x3487 == 0)
m.c1050 = Constraint(expr=m.x1050*(11.179067059 + m.x2020) - m.x3488 == 0)
m.c1051 = Constraint(expr=m.x1051*(29.39924999665 + m.x2023) - m.x3489 == 0)
m.c1052 = Constraint(expr=m.x1052*(9.34536262823 + m.x2024) - m.x3490 == 0)
m.c1053 = Constraint(expr=m.x1053*(254.79773 + m.x2031) - m.x3491 == 0)
m.c1054 = Constraint(expr=m.x1054*(6.95367819652136 + m.x2091) - m.x3492 == 0)
m.c1055 = Constraint(expr=m.x1055*(68.611061605179 + m.x2092) - m.x3493 == 0)
m.c1056 = Constraint(expr=m.x1056*(149.982358690318 + m.x2093) - m.x3494 == 0)
m.c1057 = Constraint(expr=m.x1057*(175.844560388705 + m.x2094) - m.x3495 == 0)
m.c1058 = Constraint(expr=m.x1058*(10.1522671595645 + m.x2095) - m.x3496 == 0)
m.c1059 = Constraint(expr=m.x1059*(121.104830353398 + m.x2096) - m.x3497 == 0)
m.c1060 = Constraint(expr=m.x1060*(8.00892516581441 + m.x2097) - m.x3498 == 0)
m.c1061 = Constraint(expr=m.x1061*(97.3173040663597 + m.x2098) - m.x3499 == 0)
m.c1062 = Constraint(expr=m.x1062*(58.8520674314227 + m.x2099) - m.x3500 == 0)
m.c1063 = Constraint(expr=m.x1063*(89.3791992560023 + m.x2100) - m.x3501 == 0)
m.c1064 = Constraint(expr=m.x1064*(177.636006160939 + m.x2101) - m.x3502 == 0)
m.c1065 = Constraint(expr=m.x1065*(373.780859160202 + m.x2102) - m.x3503 == 0)
m.c1066 = Constraint(expr=m.x1066*(95.28044 + m.x2106) - m.x3504 == 0)
m.c1067 = Constraint(expr=m.x1067*(11.179067059 + m.x2020) - m.x3505 == 0)
m.c1068 = Constraint(expr=m.x1068*(29.39924999665 + m.x2023) - m.x3506 == 0)
m.c1069 = Constraint(expr=m.x1069*(9.34536262823 + m.x2024) - m.x3507 == 0)
m.c1070 = Constraint(expr=m.x1070*(6.95367819652136 + m.x2091) - m.x3508 == 0)
m.c1071 = Constraint(expr=m.x1071*(68.611061605179 + m.x2092) - m.x3509 == 0)
m.c1072 = Constraint(expr=m.x1072*(149.982358690318 + m.x2093) - m.x3510 == 0)
m.c1073 = Constraint(expr=m.x1073*(175.844560388705 + m.x2094) - m.x3511 == 0)
m.c1074 = Constraint(expr=m.x1074*(10.1522671595645 + m.x2095) - m.x3512 == 0)
m.c1075 = Constraint(expr=m.x1075*(121.104830353398 + m.x2096) - m.x3513 == 0)
m.c1076 = Constraint(expr=m.x1076*(8.00892516581441 + m.x2097) - m.x3514 == 0)
m.c1077 = Constraint(expr=m.x1077*(97.3173040663597 + m.x2098) - m.x3515 == 0)
m.c1078 = Constraint(expr=m.x1078*(58.8520674314227 + m.x2099) - m.x3516 == 0)
m.c1079 = Constraint(expr=m.x1079*(89.3791992560023 + m.x2100) - m.x3517 == 0)
m.c1080 = Constraint(expr=m.x1080*(177.636006160939 + m.x2101) - m.x3518 == 0)
m.c1081 = Constraint(expr=m.x1081*(373.780859160202 + m.x2102) - m.x3519 == 0)
m.c1082 = Constraint(expr=m.x1082*(95.28044 + m.x2106) - m.x3520 == 0)
m.c1083 = Constraint(expr=m.x1083*(85.0133724208126 + m.x1997) - m.x3521 == 0)
m.c1084 = Constraint(expr=m.x1084*(108.052970594387 + m.x1998) - m.x3522 == 0)
m.c1085 = Constraint(expr=m.x1085*(9.3711459385556 + m.x1999) - m.x3523 == 0)
m.c1086 = Constraint(expr=m.x1086*(10.69589 + m.x2000) - m.x3524 == 0)
m.c1087 = Constraint(expr=m.x1087*(17.932791400734 + m.x2001) - m.x3525 == 0)
m.c1088 = Constraint(expr=m.x1088*(88.805712423724 + m.x2002) - m.x3526 == 0)
m.c1089 = Constraint(expr=m.x1089*(67.92235 + m.x2003) - m.x3527 == 0)
m.c1090 = Constraint(expr=m.x1090*(17.52572 + m.x2004) - m.x3528 == 0)
m.c1091 = Constraint(expr=m.x1091*(75.509965 + m.x2005) - m.x3529 == 0)
m.c1092 = Constraint(expr=m.x1092*(68.860513 + m.x2006) - m.x3530 == 0)
m.c1093 = Constraint(expr=m.x1093*(215.1945909789 + m.x2007) - m.x3531 == 0)
m.c1094 = Constraint(expr=m.x1094*(17.9818975244236 + m.x2008) - m.x3532 == 0)
m.c1095 = Constraint(expr=m.x1095*(82.3846155412095 + m.x2009) - m.x3533 == 0)
m.c1096 = Constraint(expr=m.x1096*(15.77529785051 + m.x2010) - m.x3534 == 0)
m.c1097 = Constraint(expr=m.x1097*(20.585074453376 + m.x2011) - m.x3535 == 0)
m.c1098 = Constraint(expr=m.x1098*(17.73824148824 + m.x2012) - m.x3536 == 0)
m.c1099 = Constraint(expr=m.x1099*(9.7831921864888 + m.x2013) - m.x3537 == 0)
m.c1100 = Constraint(expr=m.x1100*(58.3304919073372 + m.x2014) - m.x3538 == 0)
m.c1101 = Constraint(expr=m.x1101*(70.841638270004 + m.x2015) - m.x3539 == 0)
m.c1102 = Constraint(expr=m.x1102*(2.457537796 + m.x2016) - m.x3540 == 0)
m.c1103 = Constraint(expr=m.x1103*(12.908328297966 + m.x2017) - m.x3541 == 0)
m.c1104 = Constraint(expr=m.x1104*(25.5807469993058 + m.x2018) - m.x3542 == 0)
m.c1105 = Constraint(expr=m.x1105*(29.0537838075122 + m.x2019) - m.x3543 == 0)
m.c1106 = Constraint(expr=m.x1106*(11.179067059 + m.x2020) - m.x3544 == 0)
m.c1107 = Constraint(expr=m.x1107*(16.47769975 + m.x2021) - m.x3545 == 0)
m.c1108 = Constraint(expr=m.x1108*(10.8297732214437 + m.x2022) - m.x3546 == 0)
m.c1109 = Constraint(expr=m.x1109*(29.39924999665 + m.x2023) - m.x3547 == 0)
m.c1110 = Constraint(expr=m.x1110*(9.34536262823 + m.x2024) - m.x3548 == 0)
m.c1111 = Constraint(expr=m.x1111*(27.47191645805 + m.x2028) - m.x3549 == 0)
m.c1112 = Constraint(expr=m.x1112*(40.593786 + m.x2029) - m.x3550 == 0)
m.c1113 = Constraint(expr=m.x1113*(277.48319 + m.x2030) - m.x3551 == 0)
m.c1114 = Constraint(expr=m.x1114*(254.79773 + m.x2031) - m.x3552 == 0)
m.c1115 = Constraint(expr=m.x1115*(117.202966 + m.x2032) - m.x3553 == 0)
m.c1116 = Constraint(expr=m.x1116*(20.035404 + m.x2033) - m.x3554 == 0)
m.c1117 = Constraint(expr=m.x1117*(32.373595 + m.x2034) - m.x3555 == 0)
m.c1118 = Constraint(expr=m.x1118*(46.195028 + m.x2035) - m.x3556 == 0)
m.c1119 = Constraint(expr=m.x1119*(118.743516912 + m.x2036) - m.x3557 == 0)
m.c1120 = Constraint(expr=m.x1120*(22.880176696 + m.x2038) - m.x3558 == 0)
m.c1121 = Constraint(expr=m.x1121*(6.95367819652136 + m.x2091) - m.x3559 == 0)
m.c1122 = Constraint(expr=m.x1122*(68.611061605179 + m.x2092) - m.x3560 == 0)
m.c1123 = Constraint(expr=m.x1123*(149.982358690318 + m.x2093) - m.x3561 == 0)
m.c1124 = Constraint(expr=m.x1124*(175.844560388705 + m.x2094) - m.x3562 == 0)
m.c1125 = Constraint(expr=m.x1125*(10.1522671595645 + m.x2095) - m.x3563 == 0)
m.c1126 = Constraint(expr=m.x1126*(121.104830353398 + m.x2096) - m.x3564 == 0)
m.c1127 = Constraint(expr=m.x1127*(8.00892516581441 + m.x2097) - m.x3565 == 0)
m.c1128 = Constraint(expr=m.x1128*(97.3173040663597 + m.x2098) - m.x3566 == 0)
m.c1129 = Constraint(expr=m.x1129*(58.8520674314227 + m.x2099) - m.x3567 == 0)
m.c1130 = Constraint(expr=m.x1130*(89.3791992560023 + m.x2100) - m.x3568 == 0)
m.c1131 = Constraint(expr=m.x1131*(177.636006160939 + m.x2101) - m.x3569 == 0)
m.c1132 = Constraint(expr=m.x1132*(373.780859160202 + m.x2102) - m.x3570 == 0)
m.c1133 = Constraint(expr=m.x1133*(95.28044 + m.x2106) - m.x3571 == 0)
m.c1134 = Constraint(expr=m.x1134*(9.3711459385556 + m.x1999) - m.x3572 == 0)
m.c1135 = Constraint(expr=m.x1135*(10.69589 + m.x2000) - m.x3573 == 0)
m.c1136 = Constraint(expr=m.x1136*(17.932791400734 + m.x2001) - m.x3574 == 0)
m.c1137 = Constraint(expr=m.x1137*(88.805712423724 + m.x2002) - m.x3575 == 0)
m.c1138 = Constraint(expr=m.x1138*(67.92235 + m.x2003) - m.x3576 == 0)
m.c1139 = Constraint(expr=m.x1139*(17.52572 + m.x2004) - m.x3577 == 0)
m.c1140 = Constraint(expr=m.x1140*(75.509965 + m.x2005) - m.x3578 == 0)
m.c1141 = Constraint(expr=m.x1141*(68.860513 + m.x2006) - m.x3579 == 0)
m.c1142 = Constraint(expr=m.x1142*(215.1945909789 + m.x2007) - m.x3580 == 0)
m.c1143 = Constraint(expr=m.x1143*(17.9818975244236 + m.x2008) - m.x3581 == 0)
m.c1144 = Constraint(expr=m.x1144*(82.3846155412095 + m.x2009) - m.x3582 == 0)
m.c1145 = Constraint(expr=m.x1145*(15.77529785051 + m.x2010) - m.x3583 == 0)
m.c1146 = Constraint(expr=m.x1146*(20.585074453376 + m.x2011) - m.x3584 == 0)
m.c1147 = Constraint(expr=m.x1147*(17.73824148824 + m.x2012) - m.x3585 == 0)
m.c1148 = Constraint(expr=m.x1148*(9.7831921864888 + m.x2013) - m.x3586 == 0)
m.c1149 = Constraint(expr=m.x1149*(58.3304919073372 + m.x2014) - m.x3587 == 0)
m.c1150 = Constraint(expr=m.x1150*(70.841638270004 + m.x2015) - m.x3588 == 0)
m.c1151 = Constraint(expr=m.x1151*(2.457537796 + m.x2016) - m.x3589 == 0)
m.c1152 = Constraint(expr=m.x1152*(12.908328297966 + m.x2017) - m.x3590 == 0)
m.c1153 = Constraint(expr=m.x1153*(25.5807469993058 + m.x2018) - m.x3591 == 0)
m.c1154 = Constraint(expr=m.x1154*(29.0537838075122 + m.x2019) - m.x3592 == 0)
m.c1155 = Constraint(expr=m.x1155*(11.179067059 + m.x2020) - m.x3593 == 0)
m.c1156 = Constraint(expr=m.x1156*(16.47769975 + m.x2021) - m.x3594 == 0)
m.c1157 = Constraint(expr=m.x1157*(10.8297732214437 + m.x2022) - m.x3595 == 0)
m.c1158 = Constraint(expr=m.x1158*(29.39924999665 + m.x2023) - m.x3596 == 0)
m.c1159 = Constraint(expr=m.x1159*(9.34536262823 + m.x2024) - m.x3597 == 0)
m.c1160 = Constraint(expr=m.x1160*(17.3365643030813 + m.x2025) - m.x3598 == 0)
m.c1161 = Constraint(expr=m.x1161*(48.547749096 + m.x2026) - m.x3599 == 0)
m.c1162 = Constraint(expr=m.x1162*(27.47191645805 + m.x2028) - m.x3600 == 0)
m.c1163 = Constraint(expr=m.x1163*(40.593786 + m.x2029) - m.x3601 == 0)
m.c1164 = Constraint(expr=m.x1164*(277.48319 + m.x2030) - m.x3602 == 0)
m.c1165 = Constraint(expr=m.x1165*(254.79773 + m.x2031) - m.x3603 == 0)
m.c1166 = Constraint(expr=m.x1166*(117.202966 + m.x2032) - m.x3604 == 0)
m.c1167 = Constraint(expr=m.x1167*(20.035404 + m.x2033) - m.x3605 == 0)
m.c1168 = Constraint(expr=m.x1168*(32.373595 + m.x2034) - m.x3606 == 0)
m.c1169 = Constraint(expr=m.x1169*(46.195028 + m.x2035) - m.x3607 == 0)
m.c1170 = Constraint(expr=m.x1170*(118.743516912 + m.x2036) - m.x3608 == 0)
m.c1171 = Constraint(expr=m.x1171*(22.880176696 + m.x2038) - m.x3609 == 0)
m.c1172 = Constraint(expr=m.x1172*(10.749094 + m.x2039) - m.x3610 == 0)
m.c1173 = Constraint(expr=m.x1173*(6.95367819652136 + m.x2091) - m.x3611 == 0)
m.c1174 = Constraint(expr=m.x1174*(68.611061605179 + m.x2092) - m.x3612 == 0)
m.c1175 = Constraint(expr=m.x1175*(149.982358690318 + m.x2093) - m.x3613 == 0)
m.c1176 = Constraint(expr=m.x1176*(175.844560388705 + m.x2094) - m.x3614 == 0)
m.c1177 = Constraint(expr=m.x1177*(10.1522671595645 + m.x2095) - m.x3615 == 0)
m.c1178 = Constraint(expr=m.x1178*(121.104830353398 + m.x2096) - m.x3616 == 0)
m.c1179 = Constraint(expr=m.x1179*(8.00892516581441 + m.x2097) - m.x3617 == 0)
m.c1180 = Constraint(expr=m.x1180*(97.3173040663597 + m.x2098) - m.x3618 == 0)
m.c1181 = Constraint(expr=m.x1181*(58.8520674314227 + m.x2099) - m.x3619 == 0)
m.c1182 = Constraint(expr=m.x1182*(89.3791992560023 + m.x2100) - m.x3620 == 0)
m.c1183 = Constraint(expr=m.x1183*(177.636006160939 + m.x2101) - m.x3621 == 0)
m.c1184 = Constraint(expr=m.x1184*(373.780859160202 + m.x2102) - m.x3622 == 0)
m.c1185 = Constraint(expr=m.x1185*(85.0133724208126 + m.x1997) - m.x3623 == 0)
m.c1186 = Constraint(expr=m.x1186*(108.052970594387 + m.x1998) - m.x3624 == 0)
m.c1187 = Constraint(expr=m.x1187*(9.3711459385556 + m.x1999) - m.x3625 == 0)
m.c1188 = Constraint(expr=m.x1188*(10.69589 + m.x2000) - m.x3626 == 0)
m.c1189 = Constraint(expr=m.x1189*(17.932791400734 + m.x2001) - m.x3627 == 0)
m.c1190 = Constraint(expr=m.x1190*(88.805712423724 + m.x2002) - m.x3628 == 0)
m.c1191 = Constraint(expr=m.x1191*(67.92235 + m.x2003) - m.x3629 == 0)
m.c1192 = Constraint(expr=m.x1192*(17.52572 + m.x2004) - m.x3630 == 0)
m.c1193 = Constraint(expr=m.x1193*(75.509965 + m.x2005) - m.x3631 == 0)
m.c1194 = Constraint(expr=m.x1194*(68.860513 + m.x2006) - m.x3632 == 0)
m.c1195 = Constraint(expr=m.x1195*(277.48319 + m.x2030) - m.x3633 == 0)
m.c1196 = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.